repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
agusc/scrapy
|
scrapy/extensions/logstats.py
|
127
|
1715
|
import logging
from twisted.internet import task
from scrapy.exceptions import NotConfigured
from scrapy import signals
logger = logging.getLogger(__name__)
class LogStats(object):
"""Log basic scraping stats periodically"""
def __init__(self, stats, interval=60.0):
self.stats = stats
self.interval = interval
self.multiplier = 60.0 / self.interval
@classmethod
def from_crawler(cls, crawler):
interval = crawler.settings.getfloat('LOGSTATS_INTERVAL')
if not interval:
raise NotConfigured
o = cls(crawler.stats, interval)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
return o
def spider_opened(self, spider):
self.pagesprev = 0
self.itemsprev = 0
self.task = task.LoopingCall(self.log, spider)
self.task.start(self.interval)
def log(self, spider):
items = self.stats.get_value('item_scraped_count', 0)
pages = self.stats.get_value('response_received_count', 0)
irate = (items - self.itemsprev) * self.multiplier
prate = (pages - self.pagesprev) * self.multiplier
self.pagesprev, self.itemsprev = pages, items
msg = ("Crawled %(pages)d pages (at %(pagerate)d pages/min), "
"scraped %(items)d items (at %(itemrate)d items/min)")
log_args = {'pages': pages, 'pagerate': prate,
'items': items, 'itemrate': irate}
logger.info(msg, log_args, extra={'spider': spider})
def spider_closed(self, spider, reason):
if self.task.running:
self.task.stop()
|
bsd-3-clause
| 406,564,588,185,147,970 | 3,895,849,934,568,415,700 | 32.627451 | 78 | 0.63207 | false |
keithroe/vtkoptix
|
IO/XML/Testing/Python/TestXMLUnstructuredGridIO.py
|
23
|
2784
|
#!/usr/bin/env python
import os
import vtk
from vtk.util.misc import vtkGetDataRoot
from vtk.util.misc import vtkGetTempDir
VTK_DATA_ROOT = vtkGetDataRoot()
VTK_TEMP_DIR = vtkGetTempDir()
file0 = VTK_TEMP_DIR + '/ugFile0.vtu'
file1 = VTK_TEMP_DIR + '/ugFile1.vtu'
file2 = VTK_TEMP_DIR + '/ugFile2.vtu'
# read in some unstructured grid data
ugReader = vtk.vtkUnstructuredGridReader()
ugReader.SetFileName(VTK_DATA_ROOT + "/Data/blow.vtk")
ugReader.SetScalarsName("thickness9")
ugReader.SetVectorsName("displacement9")
extract = vtk.vtkExtractUnstructuredGridPiece()
extract.SetInputConnection(ugReader.GetOutputPort())
# write various versions
ugWriter = vtk.vtkXMLUnstructuredGridWriter()
ugWriter.SetFileName(file0)
ugWriter.SetDataModeToAscii()
ugWriter.SetInputConnection(ugReader.GetOutputPort())
ugWriter.Write()
ugWriter.SetFileName(file1)
ugWriter.SetInputConnection(extract.GetOutputPort())
ugWriter.SetDataModeToAppended()
ugWriter.SetNumberOfPieces(2)
ugWriter.Write()
ugWriter.SetFileName(file2)
ugWriter.SetDataModeToBinary()
ugWriter.SetGhostLevel(2)
ugWriter.Write()
# read the ASCII version
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(file0)
reader.Update()
ug0 = vtk.vtkUnstructuredGrid()
ug0.DeepCopy(reader.GetOutput())
sF = vtk.vtkDataSetSurfaceFilter()
sF.SetInputData(ug0)
mapper0 = vtk.vtkPolyDataMapper()
mapper0.SetInputConnection(sF.GetOutputPort())
actor0 = vtk.vtkActor()
actor0.SetMapper(mapper0)
actor0.SetPosition(0, 40, 20)
# read appended piece 0
reader.SetFileName(file1)
sF1 = vtk.vtkDataSetSurfaceFilter()
sF1.SetInputConnection(reader.GetOutputPort())
mapper1 = vtk.vtkPolyDataMapper()
mapper1.SetInputConnection(sF1.GetOutputPort())
mapper1.SetPiece(1)
mapper1.SetNumberOfPieces(2)
actor1 = vtk.vtkActor()
actor1.SetMapper(mapper1)
# read binary piece 0 (with ghost level)
reader2 = vtk.vtkXMLUnstructuredGridReader()
reader2.SetFileName(file2)
sF2 = vtk.vtkDataSetSurfaceFilter()
sF2.SetInputConnection(reader2.GetOutputPort())
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInputConnection(sF2.GetOutputPort())
mapper2.SetPiece(1)
mapper2.SetNumberOfPieces(2)
mapper2.SetGhostLevel(2)
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
actor2.SetPosition(0, 0, 30)
# Create the RenderWindow, Renderer and both Actors
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren.AddActor(actor0)
ren.AddActor(actor1)
ren.AddActor(actor2)
ren.ResetCamera()
ren.GetActiveCamera().SetPosition(180, 55, 65)
ren.GetActiveCamera().SetFocalPoint(3.5, 32, 15)
renWin.SetSize(300, 300)
renWin.Render()
#os.remove(file0)
#os.remove(file1)
#os.remove(file2)
|
bsd-3-clause
| -7,984,010,914,604,030,000 | -1,494,365,995,004,313,000 | 23.208696 | 61 | 0.795977 | false |
foursquare/pants
|
contrib/go/src/python/pants/contrib/go/tasks/go_test.py
|
1
|
2117
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import filter
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.contrib.go.tasks.go_workspace_task import GoWorkspaceTask
class GoTest(GoWorkspaceTask):
"""Runs `go test` on Go packages.
To run a library's tests, GoTest only requires a Go workspace to be initialized
(see GoWorkspaceTask) with links to necessary source files. It does not require
GoCompile to first compile the library to be tested -- in fact, GoTest will ignore
any binaries in "$GOPATH/pkg/", because Go test files (which live in the package
they are testing) are ignored in normal compilation, so Go test must compile everything
from scratch.
"""
@classmethod
def register_options(cls, register):
super(GoTest, cls).register_options(register)
register('--build-and-test-flags', default='',
fingerprint=True,
help='Flags to pass in to `go test` tool.')
@classmethod
def supports_passthru_args(cls):
return True
def execute(self):
# Only executes the tests from the package specified by the target roots, so
# we don't run the tests for _all_ dependencies of said package.
targets = filter(self.is_local_src, self.context.target_roots)
for target in targets:
self.ensure_workspace(target)
self._go_test(target)
def _go_test(self, target):
args = (self.get_options().build_and_test_flags.split()
+ [target.import_path]
+ self.get_passthru_args())
result, go_cmd = self.go_dist.execute_go_cmd('test', gopath=self.get_gopath(target), args=args,
workunit_factory=self.context.new_workunit,
workunit_labels=[WorkUnitLabel.TEST])
if result != 0:
raise TaskError('{} failed with exit code {}'.format(go_cmd, result))
|
apache-2.0
| 1,722,197,371,177,485,300 | -3,586,717,737,596,575,000 | 38.943396 | 99 | 0.683042 | false |
VishvajitP/django-extensions
|
django_extensions/utils/validatingtemplatetags.py
|
26
|
2469
|
from django.template import defaulttags
from django.template.base import Library, Node
from django.templatetags import future
register = Library()
error_on_old_style_url_tag = False
new_style_url_tag = False
errors = []
def before_new_template(force_new_urls):
"""Reset state ready for new template"""
global new_style_url_tag, error_on_old_style_url_tag, errors
new_style_url_tag = False
error_on_old_style_url_tag = force_new_urls
errors = []
def get_template_errors():
return errors
# Disable extends and include as they are not needed, slow parsing down, and cause duplicate errors
class NoOpNode(Node):
def render(self, context):
return ''
@register.tag
def extends(parser, token):
return NoOpNode()
@register.tag
def include(parser, token):
return NoOpNode()
# We replace load to determine whether new style urls are in use and re-patch url after
# a future version is loaded
@register.tag
def load(parser, token):
global new_style_url_tag
bits = token.contents.split()
reloaded_url_tag = False
if len(bits) >= 4 and bits[-2] == "from" and bits[-1] == "future":
for name in bits[1:-2]:
if name == "url":
new_style_url_tag = True
reloaded_url_tag = True
try:
return defaulttags.load(parser, token)
finally:
if reloaded_url_tag:
parser.tags['url'] = new_style_url
@register.tag(name='url')
def old_style_url(parser, token):
global error_on_old_style_url_tag
bits = token.split_contents()
view = bits[1]
if error_on_old_style_url_tag:
_error("Old style url tag used (only reported once per file): {%% %s %%}" % (" ".join(bits)), token)
error_on_old_style_url_tag = False
if view[0] in "\"'" and view[0] == view[-1]:
_error("Old style url tag with quotes around view name: {%% %s %%}" % (" ".join(bits)), token)
return defaulttags.url(parser, token)
def new_style_url(parser, token):
bits = token.split_contents()
view = bits[1]
if view[0] not in "\"'" or view[0] != view[-1]:
_error("New style url tag without quotes around view name: {%% %s %%}" % (" ".join(bits)), token)
return future.url(parser, token)
def _error(message, token):
origin, (start, upto) = token.source
source = origin.reload()
line = source.count("\n", 0, start) + 1 # 1 based line numbering
errors.append((origin, line, message))
|
mit
| 4,216,780,764,316,967,000 | 6,073,189,319,554,758,000 | 25.836957 | 108 | 0.63467 | false |
mxOBS/deb-pkg_trusty_chromium-browser
|
tools/telemetry/telemetry/core/discover.py
|
12
|
4024
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import inspect
import os
import re
from telemetry import decorators
from telemetry.core import camel_case
@decorators.Cache
def DiscoverModules(start_dir, top_level_dir, pattern='*'):
"""Discover all modules in |start_dir| which match |pattern|.
Args:
start_dir: The directory to recursively search.
top_level_dir: The top level of the package, for importing.
pattern: Unix shell-style pattern for filtering the filenames to import.
Returns:
list of modules.
"""
modules = []
for dir_path, _, filenames in os.walk(start_dir):
for filename in filenames:
# Filter out unwanted filenames.
if filename.startswith('.') or filename.startswith('_'):
continue
if os.path.splitext(filename)[1] != '.py':
continue
if not fnmatch.fnmatch(filename, pattern):
continue
# Find the module.
module_rel_path = os.path.relpath(os.path.join(dir_path, filename),
top_level_dir)
module_name = re.sub(r'[/\\]', '.', os.path.splitext(module_rel_path)[0])
# Import the module.
try:
module = __import__(module_name, fromlist=[True])
except ImportError:
continue
modules.append(module)
return modules
# TODO(dtu): Normalize all discoverable classes to have corresponding module
# and class names, then always index by class name.
@decorators.Cache
def DiscoverClasses(start_dir, top_level_dir, base_class, pattern='*',
index_by_class_name=False):
"""Discover all classes in |start_dir| which subclass |base_class|.
Base classes that contain subclasses are ignored by default.
Args:
start_dir: The directory to recursively search.
top_level_dir: The top level of the package, for importing.
base_class: The base class to search for.
pattern: Unix shell-style pattern for filtering the filenames to import.
index_by_class_name: If True, use class name converted to
lowercase_with_underscores instead of module name in return dict keys.
Returns:
dict of {module_name: class} or {underscored_class_name: class}
"""
modules = DiscoverModules(start_dir, top_level_dir, pattern)
classes = {}
for module in modules:
new_classes = DiscoverClassesInModule(
module, base_class, index_by_class_name)
classes = dict(classes.items() + new_classes.items())
return classes
@decorators.Cache
def DiscoverClassesInModule(module, base_class, index_by_class_name=False):
"""Discover all classes in |module| which subclass |base_class|.
Base classes that contain subclasses are ignored by default.
Args:
module: The module to search.
base_class: The base class to search for.
index_by_class_name: If True, use class name converted to
lowercase_with_underscores instead of module name in return dict keys.
Returns:
dict of {module_name: class} or {underscored_class_name: class}
"""
classes = {}
for _, obj in inspect.getmembers(module):
# Ensure object is a class.
if not inspect.isclass(obj):
continue
# Include only subclasses of base_class.
if not issubclass(obj, base_class):
continue
# Exclude the base_class itself.
if obj is base_class:
continue
# Exclude protected or private classes.
if obj.__name__.startswith('_'):
continue
# Include only the module in which the class is defined.
# If a class is imported by another module, exclude those duplicates.
if obj.__module__ != module.__name__:
continue
if index_by_class_name:
key_name = camel_case.ToUnderscore(obj.__name__)
else:
key_name = module.__name__.split('.')[-1]
classes[key_name] = obj
return classes
_counter = [0]
def _GetUniqueModuleName():
_counter[0] += 1
return "module_" + str(_counter[0])
|
bsd-3-clause
| -481,233,330,400,601,340 | 4,329,747,723,424,590,300 | 31.192 | 79 | 0.676193 | false |
bodylabs/blmath
|
blmath/geometry/transform/correspondence.py
|
1
|
2095
|
# FIXME -- move back to core
def apply_correspondence(correspondence_src, correspondence_dst, vertices):
"""
Apply a correspondence defined between two vertex sets to a new set.
Identifies a correspondence between `correspondence_src` and
`correspondence_dst` then applies that correspondence to `vertices`.
That is, `correspondence_src` is to `correspondence_dst` as `vertices` is
to [ return value ].
`correspondence_src` and `vertices` must have the same topology. The return
value will have the same topology as `correspondence_dst`. Arguments can
be passed as `chumpy` or `numpy` arrays.
The most common usecase here is establishing a relationship between an
alignment and a pointcloud or set of landmarks. The pointcloud or landmarks
can then be moved automatically as the alignment is adjusted (e.g. fit to a
different mesh, reposed, etc).
Args:
correspondence_src: The source vertices for the correspondence
correspondence_dst: The destination vertices for the correspondence
vertices: The vertices to map using the defined correspondence
Returns:
the mapped version of `vertices`
Example usage
-------------
>>> transformed_scan_vertices = apply_correspondence(
... correspondence_src=alignment.v,
... correspondence_dst=scan.v,
... vertices=reposed_alignment.v
... )
>>> transformed_scan = Mesh(v=transformed_scan_vertices, vc=scan.vc)
"""
import chumpy as ch
from bodylabs.mesh.landmarking.transformed_lm import TransformedCoeffs
from bodylabs.mesh.landmarking.transformed_lm import TransformedLms
ch_desired = any([
isinstance(correspondence_src, ch.Ch),
isinstance(correspondence_dst, ch.Ch),
isinstance(vertices, ch.Ch),
])
coeffs = TransformedCoeffs(
src_v=correspondence_src, dst_v=correspondence_dst)
transformed_vertices = TransformedLms(
transformed_coeffs=coeffs, src_v=vertices)
return transformed_vertices if ch_desired else transformed_vertices.r
|
bsd-2-clause
| -6,639,314,551,644,220,000 | 1,094,212,118,123,991,300 | 36.410714 | 79 | 0.705967 | false |
shadyueh/pyranking
|
env/lib/python2.7/site-packages/setuptools/sandbox.py
|
259
|
13925
|
import os
import sys
import tempfile
import operator
import functools
import itertools
import re
import contextlib
import pickle
import pkg_resources
if sys.platform.startswith('java'):
import org.python.modules.posix.PosixModule as _os
else:
_os = sys.modules[os.name]
try:
_file = file
except NameError:
_file = None
_open = open
from distutils.errors import DistutilsError
from pkg_resources import working_set
from setuptools import compat
from setuptools.compat import builtins
__all__ = [
"AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
]
def _execfile(filename, globals, locals=None):
"""
Python 3 implementation of execfile.
"""
mode = 'rb'
with open(filename, mode) as stream:
script = stream.read()
# compile() function in Python 2.6 and 3.1 requires LF line endings.
if sys.version_info[:2] < (2, 7) or sys.version_info[:2] >= (3, 0) and sys.version_info[:2] < (3, 2):
script = script.replace(b'\r\n', b'\n')
script = script.replace(b'\r', b'\n')
if locals is None:
locals = globals
code = compile(script, filename, 'exec')
exec(code, globals, locals)
@contextlib.contextmanager
def save_argv(repl=None):
saved = sys.argv[:]
if repl is not None:
sys.argv[:] = repl
try:
yield saved
finally:
sys.argv[:] = saved
@contextlib.contextmanager
def save_path():
saved = sys.path[:]
try:
yield saved
finally:
sys.path[:] = saved
@contextlib.contextmanager
def override_temp(replacement):
"""
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
"""
if not os.path.isdir(replacement):
os.makedirs(replacement)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved
@contextlib.contextmanager
def pushd(target):
saved = os.getcwd()
os.chdir(target)
try:
yield saved
finally:
os.chdir(saved)
class UnpickleableException(Exception):
"""
An exception representing another Exception that could not be pickled.
"""
@classmethod
def dump(cls, type, exc):
"""
Always return a dumped (pickled) type and exc. If exc can't be pickled,
wrap it in UnpickleableException first.
"""
try:
return pickle.dumps(type), pickle.dumps(exc)
except Exception:
return cls.dump(cls, cls(repr(exc)))
class ExceptionSaver:
"""
A Context Manager that will save an exception, serialized, and restore it
later.
"""
def __enter__(self):
return self
def __exit__(self, type, exc, tb):
if not exc:
return
# dump the exception
self._saved = UnpickleableException.dump(type, exc)
self._tb = tb
# suppress the exception
return True
def resume(self):
"restore and re-raise any exception"
if '_saved' not in vars(self):
return
type, exc = map(pickle.loads, self._saved)
compat.reraise(type, exc, self._tb)
@contextlib.contextmanager
def save_modules():
"""
Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context.
"""
saved = sys.modules.copy()
with ExceptionSaver() as saved_exc:
yield saved
sys.modules.update(saved)
# remove any modules imported since
del_modules = (
mod_name for mod_name in sys.modules
if mod_name not in saved
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
)
_clear_modules(del_modules)
saved_exc.resume()
def _clear_modules(module_names):
for mod_name in list(module_names):
del sys.modules[mod_name]
@contextlib.contextmanager
def save_pkg_resources_state():
saved = pkg_resources.__getstate__()
try:
yield saved
finally:
pkg_resources.__setstate__(saved)
@contextlib.contextmanager
def setup_context(setup_dir):
temp_dir = os.path.join(setup_dir, 'temp')
with save_pkg_resources_state():
with save_modules():
hide_setuptools()
with save_path():
with save_argv():
with override_temp(temp_dir):
with pushd(setup_dir):
# ensure setuptools commands are available
__import__('setuptools')
yield
def _needs_hiding(mod_name):
"""
>>> _needs_hiding('setuptools')
True
>>> _needs_hiding('pkg_resources')
True
>>> _needs_hiding('setuptools_plugin')
False
>>> _needs_hiding('setuptools.__init__')
True
>>> _needs_hiding('distutils')
True
"""
pattern = re.compile('(setuptools|pkg_resources|distutils)(\.|$)')
return bool(pattern.match(mod_name))
def hide_setuptools():
"""
Remove references to setuptools' modules from sys.modules to allow the
invocation to import the most appropriate setuptools. This technique is
necessary to avoid issues such as #315 where setuptools upgrading itself
would fail to find a function declared in the metadata.
"""
modules = filter(_needs_hiding, sys.modules)
_clear_modules(modules)
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
setup_dir = os.path.abspath(os.path.dirname(setup_script))
with setup_context(setup_dir):
try:
sys.argv[:] = [setup_script]+list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist:dist.activate())
def runner():
ns = dict(__file__=setup_script, __name__='__main__')
_execfile(setup_script, ns)
DirectorySandbox(setup_dir).run(runner)
except SystemExit as v:
if v.args and v.args[0]:
raise
# Normal exit, just return
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name for name in dir(_os)
if not name.startswith('_') and hasattr(self,name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source,name))
def run(self, func):
"""Run 'func' under os sandboxing"""
try:
self._copy(self)
if _file:
builtins.file = self._file
builtins.open = self._open
self._active = True
return func()
finally:
self._active = False
if _file:
builtins.file = _file
builtins.open = _open
self._copy(_os)
def _mk_dual_path_wrapper(name):
original = getattr(_os,name)
def wrap(self,src,dst,*args,**kw):
if self._active:
src,dst = self._remap_pair(name,src,dst,*args,**kw)
return original(src,dst,*args,**kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os,name): locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return original(path,*args,**kw)
return wrap
if _file:
_file = _mk_single_path_wrapper('file', _file)
_open = _mk_single_path_wrapper('open', _open)
for name in [
"stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
"remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
"startfile", "mkfifo", "mknod", "pathconf", "access"
]:
if hasattr(_os,name): locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return self._remap_output(name, original(path,*args,**kw))
return original(path,*args,**kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os,name): locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os,name)
def wrap(self,*args,**kw):
retval = original(*args,**kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os,name): locals()[name] = _mk_query(name)
def _validate_path(self,path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self,operation,path,*args,**kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self,operation,path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self,operation,src,dst,*args,**kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation+'-from',src,*args,**kw),
self._remap_input(operation+'-to',dst,*args,**kw)
)
if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull,]
else:
_EXCEPTIONS = []
try:
from win32com.client.gencache import GetGeneratePath
_EXCEPTIONS.append(GetGeneratePath())
del GetGeneratePath
except ImportError:
# it appears pywin32 is not installed, so no need to exclude.
pass
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys([
"open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
"utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
])
_exception_patterns = [
# Allow lib2to3 to attempt to save a pickled grammar object (#121)
'.*lib2to3.*\.pickle$',
]
"exempt writing to paths that match the pattern"
def __init__(self, sandbox, exceptions=_EXCEPTIONS):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox,'')
self._exceptions = [
os.path.normcase(os.path.realpath(path))
for path in exceptions
]
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
raise SandboxViolation(operation, args, kw)
if _file:
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path,mode,*args,**kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path,mode,*args,**kw)
def tmpnam(self):
self._violation("tmpnam")
def _ok(self, path):
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
return (
self._exempted(realpath)
or realpath == self._sandbox
or realpath.startswith(self._prefix)
)
finally:
self._active = active
def _exempted(self, filepath):
start_matches = (
filepath.startswith(exception)
for exception in self._exceptions
)
pattern_matches = (
re.match(pattern, filepath)
for pattern in self._exception_patterns
)
candidates = itertools.chain(start_matches, pattern_matches)
return any(candidates)
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src,dst)
def open(self, file, flags, mode=0o777, *args, **kw):
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file,flags,mode, *args, **kw)
WRITE_FLAGS = functools.reduce(
operator.or_, [getattr(_os, a, 0) for a in
"O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
def __str__(self):
return """SandboxViolation: %s%r %s
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.""" % self.args
#
|
mit
| -2,776,341,567,874,974,000 | -8,297,946,961,979,982,000 | 27.476483 | 105 | 0.591023 | false |
flavour/helios
|
controllers/org.py
|
3
|
5103
|
# -*- coding: utf-8 -*-
"""
Organization Registry - Controllers
@author: Fran Boon
@author: Michael Howden
"""
module = request.controller
resourcename = request.function
if not deployment_settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# Options Menu (available in all Functions" Views)
s3_menu(module)
# =============================================================================
def index():
""" Module's Home Page """
module_name = deployment_settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# =============================================================================
def sector():
""" RESTful CRUD controller """
#tablename = "%s_%s" % (module, resourcename)
#table = db[tablename]
return s3_rest_controller(module, resourcename)
# -----------------------------------------------------------------------------
def subsector():
""" RESTful CRUD controller """
#tablename = "%s_%s" % (module, resourcename)
#table = db[tablename]
return s3_rest_controller(module, resourcename)
# =============================================================================
def site():
""" RESTful CRUD controller """
return s3_rest_controller(module, resourcename)
# -----------------------------------------------------------------------------
def site_org_json():
table = db.org_site
otable = db.org_organisation
response.headers["Content-Type"] = "application/json"
#db.req_commit.date.represent = lambda dt: dt[:10]
query = (table.site_id == request.args[0]) & \
(table.organisation_id == otable.id)
records = db(query).select(otable.id,
otable.name)
return records.json()
# =============================================================================
def organisation():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
#return response.s3.organisation_controller()
return organisation_controller()
# -----------------------------------------------------------------------------
def organisation_list_represent(l):
if l:
max = 4
if len(l) > max:
count = 1
for x in l:
if count == 1:
output = organisation_represent(x)
elif count > max:
return "%s, etc" % output
else:
output = "%s, %s" % (output, organisation_represent(x))
count += 1
else:
return ", ".join([organisation_represent(x) for x in l])
else:
return NONE
# =============================================================================
def office():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
#return response.s3.office_controller()
return office_controller()
# =============================================================================
def person():
""" Person controller for AddPersonWidget """
def prep(r):
if r.representation != "s3json":
# Do not serve other representations here
return False
else:
s3mgr.show_ids = True
return True
response.s3.prep = prep
return s3_rest_controller("pr", "person")
# =============================================================================
def room():
""" RESTful CRUD controller """
return s3_rest_controller(module, resourcename)
# =============================================================================
def incoming():
""" Incoming Shipments """
s3mgr.load("inv_inv_item")
return response.s3.inv_incoming()
# =============================================================================
def req_match():
""" Match Requests """
s3mgr.load("req_req")
return response.s3.req_match()
# =============================================================================
def donor():
""" RESTful CRUD controller """
tablename = "org_donor"
table = db[tablename]
tablename = "org_donor"
s3.crud_strings[tablename] = Storage(
title_create = ADD_DONOR,
title_display = T("Donor Details"),
title_list = T("Donors Report"),
title_update = T("Edit Donor"),
title_search = T("Search Donors"),
subtitle_create = T("Add New Donor"),
subtitle_list = T("Donors"),
label_list_button = T("List Donors"),
label_create_button = ADD_DONOR,
label_delete_button = T("Delete Donor"),
msg_record_created = T("Donor added"),
msg_record_modified = T("Donor updated"),
msg_record_deleted = T("Donor deleted"),
msg_list_empty = T("No Donors currently registered"))
s3mgr.configure(tablename, listadd=False)
output = s3_rest_controller(module, resourcename)
return output
# END =========================================================================
|
mit
| -4,555,192,975,244,885,500 | 6,751,883,772,538,613,000 | 30.5 | 79 | 0.472075 | false |
gerrit-review/gerrit
|
tools/js/bowerutil.py
|
1
|
1488
|
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def hash_bower_component(hash_obj, path):
"""Hash the contents of a bower component directory.
This is a stable hash of a directory downloaded with `bower install`, minus
the .bower.json file, which is autogenerated each time by bower. Used in lieu
of hashing a zipfile of the contents, since zipfiles are difficult to hash in
a stable manner.
Args:
hash_obj: an open hash object, e.g. hashlib.sha1().
path: path to the directory to hash.
Returns:
The passed-in hash_obj.
"""
if not os.path.isdir(path):
raise ValueError('Not a directory: %s' % path)
path = os.path.abspath(path)
for root, dirs, files in os.walk(path):
dirs.sort()
for f in sorted(files):
if f == '.bower.json':
continue
p = os.path.join(root, f)
hash_obj.update(p[len(path)+1:])
hash_obj.update(open(p).read())
return hash_obj
|
apache-2.0
| 819,121,664,621,593,500 | -4,617,297,432,305,759,000 | 31.347826 | 79 | 0.704301 | false |
yugang/web-testing-service
|
wts/tests/csp/csp_sandbox_empty_int-manual.py
|
30
|
2669
|
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "sandbox "
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.sandbox
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <[email protected]>
-->
<html>
<head>
<title>CSP Test: csp_sandbox_empty_int</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#sandbox-optional"/>
<meta name="flags" content=""/>
<meta name="assert" content="sandbox allow-scripts"/>
<meta charset="utf-8"/>
</head>
<body>
<p>Test passes if there is <strong>no</strong> text "FAIL" below.</p>
<div id="test" style="display:red"></div>
<script src="support/csp.js"></script>
<script>
if (X) {
document.getElementById("test").innerHTML = "FAIL";
}
</script>
</body>
</html> """
|
bsd-3-clause
| 6,748,569,873,725,359,000 | 3,962,376,105,736,616,000 | 41.365079 | 89 | 0.707006 | false |
reminisce/mxnet
|
benchmark/opperf/utils/op_registry_utils.py
|
2
|
13293
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities to interact with MXNet operator registry."""
from operator import itemgetter
from mxnet import runtime
import mxnet as mx
from benchmark.opperf.rules.default_params import DEFAULTS_INPUTS, MX_OP_MODULE
# Operators where parameter have special criteria that cannot be cleanly automated.
# Example: sample_multinomial operator has a parameter 'data'. It expects values to sum up to 1.
unique_ops = ("sample_multinomial",)
def _select_ops(operator_names, filters=("_contrib", "_"), merge_op_forward_backward=True):
"""From a given list of operators, filter out all operator names starting with given filters and prepares
a dictionary of operator with attributes - 'has_backward' and 'nd_op_handle = mxnet.ndarray.op'
By default, merge forward and backward operators for a given op into one operator and sets the attribute
'has_backward' for the operator.
By default, filter out all Contrib operators that starts with '_contrib' and internal operators that
starts with '_'.
Note - All deprecated operators are filtered out as well.
Parameters
----------
operator_names: List[str]
List of operator names.
filters: Tuple(str)
Tuple of filters to apply on operator names.
merge_op_forward_backward: Boolean, Default - True
Merge forward and backward operators for a given op in to one op.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle"}}
"""
mx_operators = {}
operators_with_backward = []
# Filter out deprecated operators
filters += ("normal", "uniform", "BatchNorm_v1", "Flatten", "contrib_CTCLoss", "Pad", "Cast",
"Pooling_v1", "Concat", "Reshape", "Convolution_v1", "SliceChannel", "Crop",
"crop", "onehot_encode")
if merge_op_forward_backward:
filters += ("_backward",)
for cur_op_name in operator_names:
if not cur_op_name.startswith(filters):
mx_operators[cur_op_name] = {"has_backward": False,
"nd_op_handle": getattr(MX_OP_MODULE, cur_op_name)}
if cur_op_name.startswith("_backward_"):
operators_with_backward.append(cur_op_name)
if merge_op_forward_backward:
# Identify all operators that can run backward.
for op_with_backward in operators_with_backward:
op_name = op_with_backward.split("_backward_")[1]
if op_name in mx_operators:
mx_operators[op_name]["has_backward"] = True
return mx_operators
def _set_op_arguments(mx_operators):
"""Fetch and set operator arguments - nargs, arg_names, arg_types
"""
for op_name in mx_operators:
operator_arguments = mx.operator.get_operator_arguments(op_name)
mx_operators[op_name]["params"] = {"narg": operator_arguments.narg,
"arg_names": operator_arguments.names,
"arg_types": operator_arguments.types}
def _get_all_mxnet_operators():
# Step 1 - Get all registered op names and filter it
operator_names = mx.operator.get_all_registered_operators()
mx_operators = _select_ops(operator_names)
# Step 2 - Get all parameters for the operators
_set_op_arguments(mx_operators)
return mx_operators
def prepare_op_inputs(arg_params, arg_values):
inputs = []
for arg_value in arg_values:
inp = {}
for arg_name in arg_params["params"]["arg_names"]:
if arg_name in arg_value:
inp[arg_name] = arg_value[arg_name]
inputs.append(inp)
return inputs
def prepare_op_inputs(op, arg_params):
inputs = []
# 4d tensor is needed only by following two ops
ops_4d = ['depth_to_space','space_to_depth']
# Prepare op to default input mapping
arg_values = {}
for arg_name, arg_type in zip(arg_params["params"]["arg_names"],
arg_params["params"]["arg_types"]):
if "NDArray" in arg_type and arg_name + "_nd" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_nd"]
elif "NDArray" in arg_type and op in ops_4d and arg_name + "_4d" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_4d"]
elif arg_name in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name]
elif "float" in arg_type and arg_name + "_float" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_float"]
elif "Shape" in arg_type and arg_name + "_shape" in DEFAULTS_INPUTS:
# This is for cases where in some ops 'axis' is Int in some ops a shape tuple.
# Ex: axis in sum is shape, axis in sort is int.
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_shape"]
# Number of different inputs we want to use to test
# the operator
num_input_combinations = max([len(value) for value in arg_values.values()])
# Prepare key/value args for param to input value
for idx in range(num_input_combinations):
inp = {}
for arg_name in arg_params["params"]["arg_names"]:
if arg_name in arg_values:
if len(arg_values[arg_name]) == num_input_combinations:
inp[arg_name] = arg_values[arg_name][idx]
else:
# This is required when we want to use a param same across all
# input combination. Example: keeping low and high same for random sampling
# operator for all different types of Tensor shape.
inp[arg_name] = arg_values[arg_name][0]
inputs.append(inp)
return inputs
def get_all_unary_operators():
"""Gets all Unary operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for unary broadcast operators
unary_broadcast_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_params["params"]["narg"] == 1 and \
"data" in op_params["params"]["arg_names"]:
unary_broadcast_mx_operators[op_name] = mx_operators[op_name]
return unary_broadcast_mx_operators
def get_all_broadcast_binary_operators():
"""Gets all binary broadcast operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for binary broadcast operators
binary_broadcast_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name.startswith("broadcast_") and op_params["params"]["narg"] == 2 and \
"lhs" in op_params["params"]["arg_names"] and \
"rhs" in op_params["params"]["arg_names"]:
binary_broadcast_mx_operators[op_name] = mx_operators[op_name]
return binary_broadcast_mx_operators
def get_all_elemen_wise_binary_operators():
"""Gets all binary elemen_wise operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for binary elemen_wise operators
binary_elemen_wise_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name.startswith("elemwise_") and op_params["params"]["narg"] == 2 and \
"lhs" in op_params["params"]["arg_names"] and \
"rhs" in op_params["params"]["arg_names"]:
binary_elemen_wise_mx_operators[op_name] = mx_operators[op_name]
return binary_elemen_wise_mx_operators
def get_all_random_sampling_operators():
"""Gets all Random Sampling operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Random Sampling operators
random_sampling_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name.startswith(("random_", "sample_")) and op_name not in unique_ops:
random_sampling_mx_operators[op_name] = mx_operators[op_name]
return random_sampling_mx_operators
def get_all_reduction_operators():
"""Gets all Reduction operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Reduction operators
reduction_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_params["params"]["narg"] == 4 and \
set(["data", "axis", "exclude", "keepdims"]).issubset(set(op_params["params"]["arg_names"])) \
and op_name not in unique_ops:
reduction_mx_operators[op_name] = mx_operators[op_name]
return reduction_mx_operators
def get_all_optimizer_operators():
"""Gets all Optimizer operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
optimizer_ops = ['mp_sgd_update', 'signum_update', 'rmspropalex_update', 'ftml_update', 'rmsprop_update',
'sgd_mom_update', 'signsgd_update', 'mp_sgd_mom_update', 'ftrl_update', 'sgd_update',
'adam_update']
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Optimizer operators
optimizer_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name in optimizer_ops and op_name not in unique_ops:
optimizer_mx_operators[op_name] = mx_operators[op_name]
return optimizer_mx_operators
def get_all_sorting_searching_operators():
"""Gets all Sorting and Searching operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
sort_search_ops = ['sort', 'argsort', 'argmax', 'argmin', 'topk']
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Sort and search operators
sort_search_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name in sort_search_ops and op_name not in unique_ops:
sort_search_mx_operators[op_name] = mx_operators[op_name]
return sort_search_mx_operators
def get_all_rearrange_operators():
"""Gets all array rearrange operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
rearrange_ops = ['transpose','swapaxes','flip','depth_to_space','space_to_depth']
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Array Rearrange operators
rearrange_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name in rearrange_ops and op_name not in unique_ops:
rearrange_mx_operators[op_name] = mx_operators[op_name]
return rearrange_mx_operators
def get_operators_with_no_benchmark(operators_with_benchmark):
"""Gets all MXNet operators with not benchmark.
Retrieve all operators registered with MXNet and prepares a list of operators that are not part of given
operators with benchmark list.
Parameters
----------
operators_with_benchmark: list[Str]
List of operator names that has benchmarks
Returns
-------
list[Str]
List of operator names that is registered with MXNet but has no benchmarks.
"""
all_mxnet_operators = _get_all_mxnet_operators().keys()
return list(set(all_mxnet_operators) - set(operators_with_benchmark))
def get_current_runtime_features():
"""Get all current runtime time flags/configuration for MXNet.
Returns
-------
Map of current runtime features such as compile flags used by MXNet.
Example: {'runtime_features': {'OPENCV' : '✔ OPENCV', 'CUDA': '✖ CUDA'}}
"""
features = runtime.Features()
runtime_features = {}
for feature, config in sorted(features.items(), key=itemgetter(0)):
runtime_features[feature] = config
return {'runtime_features': runtime_features}
|
apache-2.0
| -6,439,189,346,956,220,000 | 3,602,274,522,490,329,000 | 36.860399 | 110 | 0.637746 | false |
sinhrks/seaborn
|
seaborn/matrix.py
|
5
|
40890
|
"""Functions to visualize matrices of data."""
import itertools
import colorsys
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
from scipy.spatial import distance
from scipy.cluster import hierarchy
from .axisgrid import Grid
from .palettes import cubehelix_palette
from .utils import despine, axis_ticklabels_overlap
from .external.six.moves import range
def _index_to_label(index):
"""Convert a pandas index or multiindex to an axis label."""
if isinstance(index, pd.MultiIndex):
return "-".join(map(str, index.names))
else:
return index.name
def _index_to_ticklabels(index):
"""Convert a pandas index or multiindex into ticklabels."""
if isinstance(index, pd.MultiIndex):
return ["-".join(map(str, i)) for i in index.values]
else:
return index.values
def _convert_colors(colors):
"""Convert either a list of colors or nested lists of colors to RGB."""
to_rgb = mpl.colors.colorConverter.to_rgb
try:
to_rgb(colors[0])
# If this works, there is only one level of colors
return list(map(to_rgb, colors))
except ValueError:
# If we get here, we have nested lists
return [list(map(to_rgb, l)) for l in colors]
def _matrix_mask(data, mask):
"""Ensure that data and mask are compatabile and add missing values.
Values will be plotted for cells where ``mask`` is ``False``.
``data`` is expected to be a DataFrame; ``mask`` can be an array or
a DataFrame.
"""
if mask is None:
mask = np.zeros(data.shape, np.bool)
if isinstance(mask, np.ndarray):
# For array masks, ensure that shape matches data then convert
if mask.shape != data.shape:
raise ValueError("Mask must have the same shape as data.")
mask = pd.DataFrame(mask,
index=data.index,
columns=data.columns,
dtype=np.bool)
elif isinstance(mask, pd.DataFrame):
# For DataFrame masks, ensure that semantic labels match data
if not mask.index.equals(data.index) \
and mask.columns.equals(data.columns):
err = "Mask must have the same index and columns as data."
raise ValueError(err)
# Add any cells with missing data to the mask
# This works around an issue where `plt.pcolormesh` doesn't represent
# missing data properly
mask = mask | pd.isnull(data)
return mask
class _HeatMapper(object):
"""Draw a heatmap plot of a matrix with nice labels and colormaps."""
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws,
xticklabels=True, yticklabels=True, mask=None):
"""Initialize the plotting object."""
# We always want to have a DataFrame with semantic information
# and an ndarray to pass to matplotlib
if isinstance(data, pd.DataFrame):
plot_data = data.values
else:
plot_data = np.asarray(data)
data = pd.DataFrame(plot_data)
# Validate the mask and convet to DataFrame
mask = _matrix_mask(data, mask)
# Reverse the rows so the plot looks like the matrix
plot_data = plot_data[::-1]
data = data.ix[::-1]
mask = mask.ix[::-1]
plot_data = np.ma.masked_where(np.asarray(mask), plot_data)
# Get good names for the rows and columns
xtickevery = 1
if isinstance(xticklabels, int) and xticklabels > 1:
xtickevery = xticklabels
xticklabels = _index_to_ticklabels(data.columns)
elif isinstance(xticklabels, bool) and xticklabels:
xticklabels = _index_to_ticklabels(data.columns)
elif isinstance(xticklabels, bool) and not xticklabels:
xticklabels = ['' for _ in range(data.shape[1])]
ytickevery = 1
if isinstance(yticklabels, int) and yticklabels > 1:
ytickevery = yticklabels
yticklabels = _index_to_ticklabels(data.index)
elif isinstance(yticklabels, bool) and yticklabels:
yticklabels = _index_to_ticklabels(data.index)
elif isinstance(yticklabels, bool) and not yticklabels:
yticklabels = ['' for _ in range(data.shape[0])]
else:
yticklabels = yticklabels[::-1]
# Get the positions and used label for the ticks
nx, ny = data.T.shape
xstart, xend, xstep = 0, nx, xtickevery
self.xticks = np.arange(xstart, xend, xstep) + .5
self.xticklabels = xticklabels[xstart:xend:xstep]
ystart, yend, ystep = (ny - 1) % ytickevery, ny, ytickevery
self.yticks = np.arange(ystart, yend, ystep) + .5
self.yticklabels = yticklabels[ystart:yend:ystep]
# Get good names for the axis labels
xlabel = _index_to_label(data.columns)
ylabel = _index_to_label(data.index)
self.xlabel = xlabel if xlabel is not None else ""
self.ylabel = ylabel if ylabel is not None else ""
# Determine good default values for the colormapping
self._determine_cmap_params(plot_data, vmin, vmax,
cmap, center, robust)
# Save other attributes to the object
self.data = data
self.plot_data = plot_data
self.annot = annot
self.fmt = fmt
self.annot_kws = {} if annot_kws is None else annot_kws
self.cbar = cbar
self.cbar_kws = {} if cbar_kws is None else cbar_kws
def _determine_cmap_params(self, plot_data, vmin, vmax,
cmap, center, robust):
"""Use some heuristics to set good defaults for colorbar and range."""
calc_data = plot_data.data[~np.isnan(plot_data.data)]
if vmin is None:
vmin = np.percentile(calc_data, 2) if robust else calc_data.min()
if vmax is None:
vmax = np.percentile(calc_data, 98) if robust else calc_data.max()
# Simple heuristics for whether these data should have a divergent map
divergent = ((vmin < 0) and (vmax > 0)) or center is not None
# Now set center to 0 so math below makes sense
if center is None:
center = 0
# A divergent map should be symmetric around the center value
if divergent:
vlim = max(abs(vmin - center), abs(vmax - center))
vmin, vmax = -vlim, vlim
self.divergent = divergent
# Now add in the centering value and set the limits
vmin += center
vmax += center
self.vmin = vmin
self.vmax = vmax
# Choose default colormaps if not provided
if cmap is None:
if divergent:
self.cmap = "RdBu_r"
else:
self.cmap = cubehelix_palette(light=.95, as_cmap=True)
else:
self.cmap = cmap
def _annotate_heatmap(self, ax, mesh):
"""Add textual labels with the value in each cell."""
xpos, ypos = np.meshgrid(ax.get_xticks(), ax.get_yticks())
for x, y, val, color in zip(xpos.flat, ypos.flat,
mesh.get_array(), mesh.get_facecolors()):
if val is not np.ma.masked:
_, l, _ = colorsys.rgb_to_hls(*color[:3])
text_color = ".15" if l > .5 else "w"
val = ("{:" + self.fmt + "}").format(val)
ax.text(x, y, val, color=text_color,
ha="center", va="center", **self.annot_kws)
def plot(self, ax, cax, kws):
"""Draw the heatmap on the provided Axes."""
# Remove all the Axes spines
despine(ax=ax, left=True, bottom=True)
# Draw the heatmap
mesh = ax.pcolormesh(self.plot_data, vmin=self.vmin, vmax=self.vmax,
cmap=self.cmap, **kws)
# Set the axis limits
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
# Add row and column labels
ax.set(xticks=self.xticks, yticks=self.yticks)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation="vertical")
# Possibly rotate them if they overlap
plt.draw()
if axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
if axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
# Add the axis labels
ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
# Annotate the cells with the formatted values
if self.annot:
self._annotate_heatmap(ax, mesh)
# Possibly add a colorbar
if self.cbar:
ticker = mpl.ticker.MaxNLocator(6)
cb = ax.figure.colorbar(mesh, cax, ax,
ticks=ticker, **self.cbar_kws)
cb.outline.set_linewidth(0)
def heatmap(data, vmin=None, vmax=None, cmap=None, center=None, robust=False,
annot=False, fmt=".2g", annot_kws=None,
linewidths=0, linecolor="white",
cbar=True, cbar_kws=None, cbar_ax=None,
square=False, ax=None, xticklabels=True, yticklabels=True,
mask=None,
**kwargs):
"""Plot rectangular data as a color-encoded matrix.
This function tries to infer a good colormap to use from the data, but
this is not guaranteed to work, so take care to make sure the kind of
colormap (sequential or diverging) and its limits are appropriate.
This is an Axes-level function and will draw the heatmap into the
currently-active Axes if none is provided to the ``ax`` argument. Part of
this Axes space will be taken and used to plot a colormap, unless ``cbar``
is False or a separate Axes is provided to ``cbar_ax``.
Parameters
----------
data : rectangular dataset
2D dataset that can be coerced into an ndarray. If a Pandas DataFrame
is provided, the index/column information will be used to label the
columns and rows.
vmin, vmax : floats, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments. When a diverging dataset is inferred,
one of these values may be ignored.
cmap : matplotlib colormap name or object, optional
The mapping from data values to color space. If not provided, this
will be either a cubehelix map (if the function infers a sequential
dataset) or ``RdBu_r`` (if the function infers a diverging dataset).
center : float, optional
The value at which to center the colormap. Passing this value implies
use of a diverging colormap.
robust : bool, optional
If True and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with robust quantiles instead of the extreme values.
annot : bool, optional
If True, write the data value in each cell.
fmt : string, optional
String formatting code to use when ``annot`` is True.
annot_kws : dict of key, value mappings, optional
Keyword arguments for ``ax.text`` when ``annot`` is True.
linewidths : float, optional
Width of the lines that will divide each cell.
linecolor : color, optional
Color of the lines that will divide each cell.
cbar : boolean, optional
Whether to draw a colorbar.
cbar_kws : dict of key, value mappings, optional
Keyword arguments for `fig.colorbar`.
cbar_ax : matplotlib Axes, optional
Axes in which to draw the colorbar, otherwise take space from the
main Axes.
square : boolean, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
xticklabels : list-like, int, or bool, optional
If True, plot the column names of the dataframe. If False, don't plot
the column names. If list-like, plot these alternate labels as the
xticklabels. If an integer, use the column names but plot only every
n label.
yticklabels : list-like, int, or bool, optional
If True, plot the row names of the dataframe. If False, don't plot
the row names. If list-like, plot these alternate labels as the
yticklabels. If an integer, use the index names but plot only every
n label.
mask : boolean array or DataFrame, optional
If passed, data will not be shown in cells where ``mask`` is True.
Cells with missing values are automatically masked.
kwargs : other keyword arguments
All other keyword arguments are passed to ``ax.pcolormesh``.
Returns
-------
ax : matplotlib Axes
Axes object with the heatmap.
Examples
--------
Plot a heatmap for a numpy array:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(0)
>>> import seaborn as sns; sns.set()
>>> uniform_data = np.random.rand(10, 12)
>>> ax = sns.heatmap(uniform_data)
Change the limits of the colormap:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(uniform_data, vmin=0, vmax=1)
Plot a heatmap for data centered on 0:
.. plot::
:context: close-figs
>>> normal_data = np.random.randn(10, 12)
>>> ax = sns.heatmap(normal_data)
Plot a dataframe with meaningful row and column labels:
.. plot::
:context: close-figs
>>> flights = sns.load_dataset("flights")
>>> flights = flights.pivot("month", "year", "passengers")
>>> ax = sns.heatmap(flights)
Annotate each cell with the numeric value using integer formatting:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, annot=True, fmt="d")
Add lines between each cell:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, linewidths=.5)
Use a different colormap:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, cmap="YlGnBu")
Center the colormap at a specific value:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, center=flights.loc["January", 1955])
Plot every other column label and don't plot row labels:
.. plot::
:context: close-figs
>>> data = np.random.randn(50, 20)
>>> ax = sns.heatmap(data, xticklabels=2, yticklabels=False)
Don't draw a colorbar:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, cbar=False)
Use different axes for the colorbar:
.. plot::
:context: close-figs
>>> grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
>>> f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws)
>>> ax = sns.heatmap(flights, ax=ax,
... cbar_ax=cbar_ax,
... cbar_kws={"orientation": "horizontal"})
Use a mask to plot only part of a matrix
.. plot::
:context: close-figs
>>> corr = np.corrcoef(np.random.randn(10, 200))
>>> mask = np.zeros_like(corr)
>>> mask[np.triu_indices_from(mask)] = True
>>> with sns.axes_style("white"):
... ax = sns.heatmap(corr, mask=mask, vmax=.3, square=True)
"""
# Initialize the plotter object
plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws, xticklabels, yticklabels,
mask)
# Add the pcolormesh kwargs here
kwargs["linewidths"] = linewidths
kwargs["edgecolor"] = linecolor
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
plotter.plot(ax, cbar_ax, kwargs)
return ax
class _DendrogramPlotter(object):
"""Object for drawing tree of similarities between data rows/columns"""
def __init__(self, data, linkage, metric, method, axis, label, rotate):
"""Plot a dendrogram of the relationships between the columns of data
Parameters
----------
data : pandas.DataFrame
Rectangular data
"""
self.axis = axis
if self.axis == 1:
data = data.T
if isinstance(data, pd.DataFrame):
array = data.values
else:
array = np.asarray(data)
data = pd.DataFrame(array)
self.array = array
self.data = data
self.shape = self.data.shape
self.metric = metric
self.method = method
self.axis = axis
self.label = label
self.rotate = rotate
if linkage is None:
self.linkage = self.calculated_linkage
else:
self.linkage = linkage
self.dendrogram = self.calculate_dendrogram()
# Dendrogram ends are always at multiples of 5, who knows why
ticks = 10 * np.arange(self.data.shape[0]) + 5
if self.label:
ticklabels = _index_to_ticklabels(self.data.index)
ticklabels = [ticklabels[i] for i in self.reordered_ind]
if self.rotate:
self.xticks = []
self.yticks = ticks
self.xticklabels = []
self.yticklabels = ticklabels
self.ylabel = _index_to_label(self.data.index)
self.xlabel = ''
else:
self.xticks = ticks
self.yticks = []
self.xticklabels = ticklabels
self.yticklabels = []
self.ylabel = ''
self.xlabel = _index_to_label(self.data.index)
else:
self.xticks, self.yticks = [], []
self.yticklabels, self.xticklabels = [], []
self.xlabel, self.ylabel = '', ''
if self.rotate:
self.X = self.dendrogram['dcoord']
self.Y = self.dendrogram['icoord']
else:
self.X = self.dendrogram['icoord']
self.Y = self.dendrogram['dcoord']
def _calculate_linkage_scipy(self):
if np.product(self.shape) >= 10000:
UserWarning('This will be slow... (gentle suggestion: '
'"pip install fastcluster")')
pairwise_dists = distance.pdist(self.array, metric=self.metric)
linkage = hierarchy.linkage(pairwise_dists, method=self.method)
del pairwise_dists
return linkage
def _calculate_linkage_fastcluster(self):
import fastcluster
# Fastcluster has a memory-saving vectorized version, but only
# with certain linkage methods, and mostly with euclidean metric
vector_methods = ('single', 'centroid', 'median', 'ward')
euclidean_methods = ('centroid', 'median', 'ward')
euclidean = self.metric == 'euclidean' and self.method in \
euclidean_methods
if euclidean or self.method == 'single':
return fastcluster.linkage_vector(self.array,
method=self.method,
metric=self.metric)
else:
pairwise_dists = distance.pdist(self.array, metric=self.metric)
linkage = fastcluster.linkage(pairwise_dists, method=self.method)
del pairwise_dists
return linkage
@property
def calculated_linkage(self):
try:
return self._calculate_linkage_fastcluster()
except ImportError:
return self._calculate_linkage_scipy()
def calculate_dendrogram(self):
"""Calculates a dendrogram based on the linkage matrix
Made a separate function, not a property because don't want to
recalculate the dendrogram every time it is accessed.
Returns
-------
dendrogram : dict
Dendrogram dictionary as returned by scipy.cluster.hierarchy
.dendrogram. The important key-value pairing is
"reordered_ind" which indicates the re-ordering of the matrix
"""
return hierarchy.dendrogram(self.linkage, no_plot=True,
color_list=['k'], color_threshold=-np.inf)
@property
def reordered_ind(self):
"""Indices of the matrix, reordered by the dendrogram"""
return self.dendrogram['leaves']
def plot(self, ax):
"""Plots a dendrogram of the similarities between data on the axes
Parameters
----------
ax : matplotlib.axes.Axes
Axes object upon which the dendrogram is plotted
"""
for x, y in zip(self.X, self.Y):
ax.plot(x, y, color='k', linewidth=.5)
if self.rotate and self.axis == 0:
ax.invert_xaxis()
ax.yaxis.set_ticks_position('right')
ymax = min(map(min, self.Y)) + max(map(max, self.Y))
ax.set_ylim(0, ymax)
ax.invert_yaxis()
else:
xmax = min(map(min, self.X)) + max(map(max, self.X))
ax.set_xlim(0, xmax)
despine(ax=ax, bottom=True, left=True)
ax.set(xticks=self.xticks, yticks=self.yticks,
xlabel=self.xlabel, ylabel=self.ylabel)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')
# Force a draw of the plot to avoid matplotlib window error
plt.draw()
if len(ytl) > 0 and axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
if len(xtl) > 0 and axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
return self
def dendrogram(data, linkage=None, axis=1, label=True, metric='euclidean',
method='average', rotate=False, ax=None):
"""Draw a tree diagram of relationships within a matrix
Parameters
----------
data : pandas.DataFrame
Rectangular data
linkage : numpy.array, optional
Linkage matrix
axis : int, optional
Which axis to use to calculate linkage. 0 is rows, 1 is columns.
label : bool, optional
If True, label the dendrogram at leaves with column or row names
metric : str, optional
Distance metric. Anything valid for scipy.spatial.distance.pdist
method : str, optional
Linkage method to use. Anything valid for
scipy.cluster.hierarchy.linkage
rotate : bool, optional
When plotting the matrix, whether to rotate it 90 degrees
counter-clockwise, so the leaves face right
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis
Returns
-------
dendrogramplotter : _DendrogramPlotter
A Dendrogram plotter object.
Notes
-----
Access the reordered dendrogram indices with
dendrogramplotter.reordered_ind
"""
plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis,
metric=metric, method=method,
label=label, rotate=rotate)
if ax is None:
ax = plt.gca()
return plotter.plot(ax=ax)
class ClusterGrid(Grid):
def __init__(self, data, pivot_kws=None, z_score=None, standard_scale=None,
figsize=None, row_colors=None, col_colors=None, mask=None):
"""Grid object for organizing clustered heatmap input on to axes"""
if isinstance(data, pd.DataFrame):
self.data = data
else:
self.data = pd.DataFrame(data)
self.data2d = self.format_data(self.data, pivot_kws, z_score,
standard_scale)
self.mask = _matrix_mask(self.data2d, mask)
if figsize is None:
width, height = 10, 10
figsize = (width, height)
self.fig = plt.figure(figsize=figsize)
if row_colors is not None:
row_colors = _convert_colors(row_colors)
self.row_colors = row_colors
if col_colors is not None:
col_colors = _convert_colors(col_colors)
self.col_colors = col_colors
width_ratios = self.dim_ratios(self.row_colors,
figsize=figsize,
axis=1)
height_ratios = self.dim_ratios(self.col_colors,
figsize=figsize,
axis=0)
nrows = 3 if self.col_colors is None else 4
ncols = 3 if self.row_colors is None else 4
self.gs = gridspec.GridSpec(nrows, ncols, wspace=0.01, hspace=0.01,
width_ratios=width_ratios,
height_ratios=height_ratios)
self.ax_row_dendrogram = self.fig.add_subplot(self.gs[nrows - 1, 0:2],
axisbg="white")
self.ax_col_dendrogram = self.fig.add_subplot(self.gs[0:2, ncols - 1],
axisbg="white")
self.ax_row_colors = None
self.ax_col_colors = None
if self.row_colors is not None:
self.ax_row_colors = self.fig.add_subplot(
self.gs[nrows - 1, ncols - 2])
if self.col_colors is not None:
self.ax_col_colors = self.fig.add_subplot(
self.gs[nrows - 2, ncols - 1])
self.ax_heatmap = self.fig.add_subplot(self.gs[nrows - 1, ncols - 1])
# colorbar for scale to left corner
self.cax = self.fig.add_subplot(self.gs[0, 0])
self.dendrogram_row = None
self.dendrogram_col = None
def format_data(self, data, pivot_kws, z_score=None,
standard_scale=None):
"""Extract variables from data or use directly."""
# Either the data is already in 2d matrix format, or need to do a pivot
if pivot_kws is not None:
data2d = data.pivot(**pivot_kws)
else:
data2d = data
if z_score is not None and standard_scale is not None:
raise ValueError(
'Cannot perform both z-scoring and standard-scaling on data')
if z_score is not None:
data2d = self.z_score(data2d, z_score)
if standard_scale is not None:
data2d = self.standard_scale(data2d, standard_scale)
return data2d
@staticmethod
def z_score(data2d, axis=1):
"""Standarize the mean and variance of the data axis
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
Returns
-------
normalized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
"""
if axis == 1:
z_scored = data2d
else:
z_scored = data2d.T
z_scored = (z_scored - z_scored.mean()) / z_scored.std()
if axis == 1:
return z_scored
else:
return z_scored.T
@staticmethod
def standard_scale(data2d, axis=1):
"""Divide the data by the difference between the max and min
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
vmin : int
If 0, then subtract the minimum of the data before dividing by
the range.
Returns
-------
standardized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
>>> import numpy as np
>>> d = np.arange(5, 8, 0.5)
>>> ClusterGrid.standard_scale(d)
array([ 0. , 0.2, 0.4, 0.6, 0.8, 1. ])
"""
# Normalize these values to range from 0 to 1
if axis == 1:
standardized = data2d
else:
standardized = data2d.T
subtract = standardized.min()
standardized = (standardized - subtract) / (
standardized.max() - standardized.min())
if axis == 1:
return standardized
else:
return standardized.T
def dim_ratios(self, side_colors, axis, figsize, side_colors_ratio=0.05):
"""Get the proportions of the figure taken up by each axes
"""
figdim = figsize[axis]
# Get resizing proportion of this figure for the dendrogram and
# colorbar, so only the heatmap gets bigger but the dendrogram stays
# the same size.
dendrogram = min(2. / figdim, .2)
# add the colorbar
colorbar_width = .8 * dendrogram
colorbar_height = .2 * dendrogram
if axis == 0:
ratios = [colorbar_width, colorbar_height]
else:
ratios = [colorbar_height, colorbar_width]
if side_colors is not None:
# Add room for the colors
ratios += [side_colors_ratio]
# Add the ratio for the heatmap itself
ratios += [.8]
return ratios
@staticmethod
def color_list_to_matrix_and_cmap(colors, ind, axis=0):
"""Turns a list of colors into a numpy matrix and matplotlib colormap
These arguments can now be plotted using heatmap(matrix, cmap)
and the provided colors will be plotted.
Parameters
----------
colors : list of matplotlib colors
Colors to label the rows or columns of a dataframe.
ind : list of ints
Ordering of the rows or columns, to reorder the original colors
by the clustered dendrogram order
axis : int
Which axis this is labeling
Returns
-------
matrix : numpy.array
A numpy array of integer values, where each corresponds to a color
from the originally provided list of colors
cmap : matplotlib.colors.ListedColormap
"""
# check for nested lists/color palettes.
# Will fail if matplotlib color is list not tuple
if any(issubclass(type(x), list) for x in colors):
all_colors = set(itertools.chain(*colors))
n = len(colors)
m = len(colors[0])
else:
all_colors = set(colors)
n = 1
m = len(colors)
colors = [colors]
color_to_value = dict((col, i) for i, col in enumerate(all_colors))
matrix = np.array([color_to_value[c]
for color in colors for c in color])
shape = (n, m)
matrix = matrix.reshape(shape)
matrix = matrix[:, ind]
if axis == 0:
# row-side:
matrix = matrix.T
cmap = mpl.colors.ListedColormap(all_colors)
return matrix, cmap
def savefig(self, *args, **kwargs):
if 'bbox_inches' not in kwargs:
kwargs['bbox_inches'] = 'tight'
self.fig.savefig(*args, **kwargs)
def plot_dendrograms(self, row_cluster, col_cluster, metric, method,
row_linkage, col_linkage):
# Plot the row dendrogram
if row_cluster:
self.dendrogram_row = dendrogram(
self.data2d, metric=metric, method=method, label=False, axis=0,
ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage)
else:
self.ax_row_dendrogram.set_xticks([])
self.ax_row_dendrogram.set_yticks([])
# PLot the column dendrogram
if col_cluster:
self.dendrogram_col = dendrogram(
self.data2d, metric=metric, method=method, label=False,
axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage)
else:
self.ax_col_dendrogram.set_xticks([])
self.ax_col_dendrogram.set_yticks([])
despine(ax=self.ax_row_dendrogram, bottom=True, left=True)
despine(ax=self.ax_col_dendrogram, bottom=True, left=True)
def plot_colors(self, xind, yind, **kws):
"""Plots color labels between the dendrogram and the heatmap
Parameters
----------
heatmap_kws : dict
Keyword arguments heatmap
"""
# Remove any custom colormap and centering
kws = kws.copy()
kws.pop('cmap', None)
kws.pop('center', None)
kws.pop('vmin', None)
kws.pop('vmax', None)
kws.pop('xticklabels', None)
kws.pop('yticklabels', None)
if self.row_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.row_colors, yind, axis=0)
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,
xticklabels=False, yticklabels=False,
**kws)
else:
despine(self.ax_row_colors, left=True, bottom=True)
if self.col_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.col_colors, xind, axis=1)
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,
xticklabels=False, yticklabels=False,
**kws)
else:
despine(self.ax_col_colors, left=True, bottom=True)
def plot_matrix(self, colorbar_kws, xind, yind, **kws):
self.data2d = self.data2d.iloc[yind, xind]
self.mask = self.mask.iloc[yind, xind]
# Try to reorganize specified tick labels, if provided
xtl = kws.pop("xticklabels", True)
try:
xtl = np.asarray(xtl)[xind]
except (TypeError, IndexError):
pass
ytl = kws.pop("yticklabels", True)
try:
ytl = np.asarray(ytl)[yind]
except (TypeError, IndexError):
pass
heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.cax,
cbar_kws=colorbar_kws, mask=self.mask,
xticklabels=xtl, yticklabels=ytl, **kws)
self.ax_heatmap.yaxis.set_ticks_position('right')
self.ax_heatmap.yaxis.set_label_position('right')
def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,
row_linkage, col_linkage, **kws):
colorbar_kws = {} if colorbar_kws is None else colorbar_kws
self.plot_dendrograms(row_cluster, col_cluster, metric, method,
row_linkage=row_linkage, col_linkage=col_linkage)
try:
xind = self.dendrogram_col.reordered_ind
except AttributeError:
xind = np.arange(self.data2d.shape[1])
try:
yind = self.dendrogram_row.reordered_ind
except AttributeError:
yind = np.arange(self.data2d.shape[0])
self.plot_colors(xind, yind, **kws)
self.plot_matrix(colorbar_kws, xind, yind, **kws)
return self
def clustermap(data, pivot_kws=None, method='average', metric='euclidean',
z_score=None, standard_scale=None, figsize=None, cbar_kws=None,
row_cluster=True, col_cluster=True,
row_linkage=None, col_linkage=None,
row_colors=None, col_colors=None, mask=None, **kwargs):
"""Plot a hierarchically clustered heatmap of a pandas DataFrame
Parameters
----------
data: pandas.DataFrame
Rectangular data for clustering. Cannot contain NAs.
pivot_kws : dict, optional
If `data` is a tidy dataframe, can provide keyword arguments for
pivot to create a rectangular dataframe.
method : str, optional
Linkage method to use for calculating clusters.
See scipy.cluster.hierarchy.linkage documentation for more information:
http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
metric : str, optional
Distance metric to use for the data. See
scipy.spatial.distance.pdist documentation for more options
http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html
z_score : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to calculate z-scores
for the rows or the columns. Z scores are: z = (x - mean)/std, so
values in each row (column) will get the mean of the row (column)
subtracted, then divided by the standard deviation of the row (column).
This ensures that each row (column) has mean of 0 and variance of 1.
standard_scale : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to standardize that
dimension, meaning for each row or column, subtract the minimum and
divide each by its maximum.
figsize: tuple of two ints, optional
Size of the figure to create.
cbar_kws : dict, optional
Keyword arguments to pass to ``cbar_kws`` in ``heatmap``, e.g. to
add a label to the colorbar.
{row,col}_cluster : bool, optional
If True, cluster the {rows, columns}.
{row,col}_linkage : numpy.array, optional
Precomputed linkage matrix for the rows or columns. See
scipy.cluster.hierarchy.linkage for specific formats.
{row,col}_colors : list-like, optional
List of colors to label for either the rows or columns. Useful to
evaluate whether samples within a group are clustered together. Can
use nested lists for multiple color levels of labeling.
mask : boolean array or DataFrame, optional
If passed, data will not be shown in cells where ``mask`` is True.
Cells with missing values are automatically masked. Only used for
visualizing, not for calculating.
kwargs : other keyword arguments
All other keyword arguments are passed to ``sns.heatmap``
Returns
-------
clustergrid : ClusterGrid
A ClusterGrid instance.
Notes
-----
The returned object has a ``savefig`` method that should be used if you
want to save the figure object without clipping the dendrograms.
To access the reordered row indices, use:
``clustergrid.dendrogram_row.reordered_ind``
Column indices, use:
``clustergrid.dendrogram_col.reordered_ind``
Examples
--------
Plot a clustered heatmap:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> flights = sns.load_dataset("flights")
>>> flights = flights.pivot("month", "year", "passengers")
>>> g = sns.clustermap(flights)
Don't cluster one of the axes:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, col_cluster=False)
Use a different colormap and add lines to separate the cells:
.. plot::
:context: close-figs
>>> cmap = sns.cubehelix_palette(as_cmap=True, rot=-.3, light=1)
>>> g = sns.clustermap(flights, cmap=cmap, linewidths=.5)
Use a different figure size:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, cmap=cmap, figsize=(7, 5))
Standardize the data across the columns:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, standard_scale=1)
Normalize the data across the rows:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, z_score=0)
Use a different clustering method:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, method="single", metric="cosine")
Add colored labels on one of the axes:
.. plot::
:context: close-figs
>>> season_colors = (sns.color_palette("BuPu", 3) +
... sns.color_palette("RdPu", 3) +
... sns.color_palette("YlGn", 3) +
... sns.color_palette("OrRd", 3))
>>> g = sns.clustermap(flights, row_colors=season_colors)
"""
plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize,
row_colors=row_colors, col_colors=col_colors,
z_score=z_score, standard_scale=standard_scale,
mask=mask)
return plotter.plot(metric=metric, method=method,
colorbar_kws=cbar_kws,
row_cluster=row_cluster, col_cluster=col_cluster,
row_linkage=row_linkage, col_linkage=col_linkage,
**kwargs)
|
bsd-3-clause
| 578,151,578,053,082,400 | 6,606,625,175,576,224,000 | 34.931459 | 96 | 0.58606 | false |
lepinkainen/pyfibot
|
pyfibot/modules/module_geoip.py
|
1
|
1389
|
from __future__ import unicode_literals, print_function, division
import pygeoip
import os.path
import sys
import socket
try:
from modules.module_usertrack import get_table
user_track_available = True
except ImportError:
user_track_available = False
# http://dev.maxmind.com/geoip/legacy/geolite/
DATAFILE = os.path.join(sys.path[0], "GeoIP.dat")
# STANDARD = reload from disk
# MEMORY_CACHE = load to memory
# MMAP_CACHE = memory using mmap
gi4 = pygeoip.GeoIP(DATAFILE, pygeoip.MEMORY_CACHE)
def command_geoip(bot, user, channel, args):
"""Determine the user's country based on host or nick, if module_usertrack is used."""
if not args:
return bot.say(channel, "usage: .geoip HOST/NICK")
host = args
nick = None
if user_track_available:
table = get_table(bot, channel)
user = table.find_one(nick=args)
if user:
nick = user["nick"]
host = user["host"]
try:
country = gi4.country_name_by_name(host)
except socket.gaierror:
country = None
if country:
if nick:
return bot.say(channel, "%s (%s) is in %s" % (nick, host, country))
return bot.say(channel, "%s is in %s" % (host, country))
if nick:
return bot.say(channel, "Host not found for %s (%s)" % (nick, host))
return bot.say(channel, "Host not found for %s" % host)
|
bsd-3-clause
| -3,932,881,246,026,836,000 | 2,647,795,150,348,748,300 | 26.78 | 90 | 0.636429 | false |
jspargo/AneMo
|
django/lib/python2.7/site-packages/django/core/context_processors.py
|
80
|
2274
|
"""
A set of request processors that return dictionaries to be merged into a
template context. Each function takes the request object as its only parameter
and returns a dictionary to add to the context.
These are referenced from the setting TEMPLATE_CONTEXT_PROCESSORS and used by
RequestContext.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.middleware.csrf import get_token
from django.utils import six
from django.utils.encoding import smart_text
from django.utils.functional import lazy
def csrf(request):
"""
Context processor that provides a CSRF token, or the string 'NOTPROVIDED' if
it has not been provided by either a view decorator or the middleware
"""
def _get_val():
token = get_token(request)
if token is None:
# In order to be able to provide debugging info in the
# case of misconfiguration, we use a sentinel value
# instead of returning an empty dict.
return 'NOTPROVIDED'
else:
return smart_text(token)
_get_val = lazy(_get_val, six.text_type)
return {'csrf_token': _get_val()}
def debug(request):
"Returns context variables helpful for debugging."
context_extras = {}
if settings.DEBUG and request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS:
context_extras['debug'] = True
from django.db import connection
context_extras['sql_queries'] = connection.queries
return context_extras
def i18n(request):
from django.utils import translation
context_extras = {}
context_extras['LANGUAGES'] = settings.LANGUAGES
context_extras['LANGUAGE_CODE'] = translation.get_language()
context_extras['LANGUAGE_BIDI'] = translation.get_language_bidi()
return context_extras
def tz(request):
from django.utils import timezone
return {'TIME_ZONE': timezone.get_current_timezone_name()}
def static(request):
"""
Adds static-related context variables to the context.
"""
return {'STATIC_URL': settings.STATIC_URL}
def media(request):
"""
Adds media-related context variables to the context.
"""
return {'MEDIA_URL': settings.MEDIA_URL}
def request(request):
return {'request': request}
|
gpl-2.0
| 8,405,584,497,348,495,000 | -581,959,219,965,663,400 | 27.074074 | 83 | 0.692612 | false |
leighpauls/k2cro4
|
tools/gyp/pylib/gyp/MSVSVersion.py
|
122
|
13527
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
if (os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# Use the 64-on-64 compiler if we can.
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValue(key, value):
"""Use reg.exe to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _RegistryKeyExists(key):
"""Use reg.exe to see if a key exists.
Args:
key: The registry key to check.
Return:
True if the key exists
"""
if not _RegistryQuery(key):
return False
return True
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005', '9.0': '2008', '10.0': '2010', '11.0': '2012'}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, 'vcexpress.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif os.path.exists(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto'):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
}
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
|
bsd-3-clause
| -8,729,348,740,987,946,000 | -1,247,328,541,312,949,000 | 35.959016 | 79 | 0.570489 | false |
fanquake/bitcoin
|
test/functional/feature_maxuploadtarget.py
|
35
|
6653
|
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respected even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
"""
from collections import defaultdict
import time
from test_framework.messages import CInv, MSG_BLOCK, msg_getdata
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, mine_large_block
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.block_receive_map = defaultdict(int)
def on_inv(self, message):
pass
def on_block(self, message):
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
class MaxUploadTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-maxuploadtarget=800",
"-acceptnonstdtxn=1",
"-peertimeout=9999", # bump because mocktime might cause a disconnect otherwise
]]
self.supports_cli = False
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# p2p_conns[0] will only request old blocks
# p2p_conns[1] will only request new blocks
# p2p_conns[2] will test resetting the counters
p2p_conns = []
for _ in range(3):
p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))
# Now mine a big block
mine_large_block(self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
mine_large_block(self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
big_new_block = int(big_new_block, 16)
# p2p_conns[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(MSG_BLOCK, big_old_block))
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
p2p_conns[0].send_and_ping(getdata_request)
assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for _ in range(3):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
self.log.info("Peer 0 disconnected after downloading old block too many times")
# Requesting the current block on p2p_conns[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(800):
p2p_conns[1].send_and_ping(getdata_request)
assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)
self.log.info("Peer 1 able to repeatedly download new block")
# But if p2p_conns[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
self.log.info("Peer 1 disconnected after trying to download old block")
self.log.info("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
# and p2p_conns[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
p2p_conns[2].sync_with_ping()
p2p_conns[2].send_and_ping(getdata_request)
assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)
self.log.info("Peer 2 able to download old block")
self.nodes[0].disconnect_p2ps()
self.log.info("Restarting node 0 with download permission and 1MB maxuploadtarget")
self.restart_node(0, ["[email protected]", "-maxuploadtarget=1"])
# Reconnect to self.nodes[0]
peer = self.nodes[0].add_p2p_connection(TestP2PConn())
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(20):
peer.send_and_ping(getdata_request)
assert_equal(peer.block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
peer.send_and_ping(getdata_request)
self.log.info("Peer still connected after trying to download old block (download permission)")
peer_info = self.nodes[0].getpeerinfo()
assert_equal(len(peer_info), 1) # node is still connected
assert_equal(peer_info[0]['permissions'], ['download'])
if __name__ == '__main__':
MaxUploadTest().main()
|
mit
| -3,548,634,096,314,418,000 | 393,331,226,839,006,100 | 39.078313 | 102 | 0.650383 | false |
llou/panopticon
|
panopticon/core/database.py
|
1
|
7145
|
# database.py is part of Panopticon.
# Panopticon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Panopticon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Panopticon. If not, see <http://www.gnu.org/licenses/>.
from contextlib import contextmanager
from paramiko import RSAKey as pRSAKey, DSSKey
from sqlalchemy import create_engine, Column, DateTime, String, Integer, Text, Boolean
from sqlalchemy.orm import sessionmaker, relationship, backref
from sqlalchemy.sql import not_
from sqlalchemy.schema import ForeignKey
from sqlalchemy.pool import NullPool
from sqlalchemy.ext.declarative import declarative_base
from panopticon.core.util.database import key_value_property
Base = declarative_base()
class Value(Base):
__tablename__ = "values"
id = Column(Integer(), primary_key=True)
name = Column(String(1000))
value = Column(String(1000), nullable=True)
parent_id = Column(Integer, ForeignKey("values.id"), nullable=True)
values = relationship("Value", backref=backref('parent', remote_side=[id],
cascade="all"))
type = Column(String(20))
def __init__(self, name, _type, value="", parent_id=None):
self.name = name
self.type = _type
self.value = value
self.parent_id = parent_id
@property
def root(self):
return self.id == self.parent
class Service(Base):
__tablename__ = "services"
name = Column(String(50), primary_key=True)
class Computer(Base):
__tablename__ = "computers"
__table_args__ = {'sqlite_autoincrement':True}
name = Column(String(255), primary_key=True)
key_name = Column(String(100), ForeignKey('keys.name', onupdate="CASCADE"))
active = Column(Boolean(), default=True)
key = relationship("Key", backref=backref('computers'))
logs = relationship("Log", backref="computer", order_by="Log.time")
def __init__(self, name, key_name="", active=True):
self.name = name
self.active = active
self.key_name = key_name
class Log(Base):
__tablename__ = "logs"
id = Column('id', Integer, primary_key=True)
time = Column(DateTime())
level = Column(String(10))
message = Column(Text())
computer_name = Column(String(255), ForeignKey('computers.name',
ondelete="CASCADE", onupdate="CASCADE"), index=True)
service_name = Column(String(255), ForeignKey('services.name',
ondelete="CASCADE", onupdate="CASCADE"), index=True)
role_name = Column(String(255), index=True)
action_name = Column(String(255), index=True)
def __init__(self, time, level, message, computer_name="",
service_name="", role_name="", action_name=""):
self.time = time
self.level = level
self.message = message
self.computer_name = computer_name
class FileTrack(Base):
__tablename__ = "filetracks"
uid = Column("uid", String(32), primary_key=True)
_computer_name = Column("computer_name", String(255),ForeignKey('computers.name'))
_path = Column("path", Text())
modification_time = Column("modification_time", DateTime())
md5 = Column("md5", String(32))
def __init__(self, computer_name, path, modification_time, md5=""):
self.computer_name = computer_name
self.path = path
self.modification_time = modification_time
self.md5 = md5
self.update_uid()
@property
def computer_name(self):
return self._computer_name
@computer_name.setter
def computer_name(self, value):
self._computer_name = value
self.update_uid()
@property
def path(self):
return self._path
@path.setter
def path(self, value):
self._path = value
self.update_uid()
def update_uid(self):
if self.computer_name and self.path:
self.uid = "%s:%s" % (self.computer_name, self.path)
else:
self.uid = ""
class Key(Base):
__tablename__ = "keys"
name = Column(String(100), primary_key=True)
algorithm = Column(String(20))
v1 = Column(String(2048))
v2 = Column(String(2048))
v3 = Column(String(2048))
v4 = Column(String(2048))
key_class = None
key_vals = []
__mapper_args__ = {'polymorphic_on' : algorithm}
@classmethod
def build_from_paramiko_key(cls, name, p_key):
if isinstance(p_key, pRSAKey):
return RSAKey(name, p_key.e, p_key.n)
elif isinstance(p_key, DSSKey):
return DSAKey(name, p_key.p, p_key.q, p_key.g, p_key.y)
else:
raise Exception("Not valid key")
def __init__(self, name, algorithm, v1, v2, v3, v4):
self.name = name
self.algorithm = algorithm
self.v1 = v1
self.v2 = v2
self.v3 = v3
self.v4 = v4
def get_paramiko_key(self):
vals = [ getattr(self, x) for x in self.key_vals ]
return self.key_class(vals=vals)
class RSAKey(Key):
__mapper_args__ = {'polymorphic_identity':'rsa'}
key_class = pRSAKey
key_vals = [ 'e', 'n' ]
def __init__(self, name, e, n):
self.name = name
self.algorithm = "rsa"
self.e = e
self.n = n
e = key_value_property("v1")
n = key_value_property("v2")
class DSAKey(Key):
__mapper_args__ = {'polymorphic_identity':'dsa'}
key_class = DSSKey
key_vals = [ 'p', 'q', 'g', 'y' ]
def __init__(self, name, p, q, g, y):
self.name = name
self.algorithm = "dsa"
self.p = p
self.q = q
self.g = g
self.y = y
p = key_value_property("v1")
q = key_value_property("v2")
g = key_value_property("v3")
y = key_value_property("v4")
class PanopticonDB(object):
def __init__(self, panopticon, engine=None):
self.panopticon = panopticon
self.engine = engine if engine is not None else create_engine(panopticon.db_url, poolclass=NullPool)
Base.metadata.create_all(self.engine)
self.Session = sessionmaker(bind=self.engine)
self.sync()
@contextmanager
def get_session(self):
session = self.Session()
yield session
session.commit()
session.close()
def purge(self,sure=False):
if sure:
Base.metadata.drop_all(self.engine)
Base.metadata.create_all(self.engine)
def sync(self):
computer_names = [ x[0] for x in self.panopticon.computers ]
with self.get_session() as session:
session.execute(Computer.__table__.update().where(Computer.name.in_(computer_names)).values(active=True))
session.execute(Computer.__table__.update().where(not_(Computer.name.in_(computer_names))).values(active=True))
|
gpl-3.0
| 4,736,598,755,265,193,000 | 3,729,390,322,827,283,000 | 31.775229 | 123 | 0.626312 | false |
i-namekawa/TopSideMonitor
|
plotting.py
|
1
|
37323
|
import os, sys, time
from glob import glob
import cv2
from pylab import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_pdf import PdfPages
matplotlib.rcParams['figure.facecolor'] = 'w'
from scipy.signal import argrelextrema
import scipy.stats as stats
import scipy.io as sio
from scipy import signal
from xlwt import Workbook
# specify these in mm to match your behavior chamber.
CHMAMBER_LENGTH=235
WATER_HIGHT=40
# quick plot should also show xy_within and location_one_third etc
# summary PDF: handle exception when a pickle file missing some fish in other pickle file
## these three taken from http://stackoverflow.com/a/18420730/566035
def strided_sliding_std_dev(data, radius=5):
windowed = rolling_window(data, (2*radius, 2*radius))
shape = windowed.shape
windowed = windowed.reshape(shape[0], shape[1], -1)
return windowed.std(axis=-1)
def rolling_window(a, window):
"""Takes a numpy array *a* and a sequence of (or single) *window* lengths
and returns a view of *a* that represents a moving window."""
if not hasattr(window, '__iter__'):
return rolling_window_lastaxis(a, window)
for i, win in enumerate(window):
if win > 1:
a = a.swapaxes(i, -1)
a = rolling_window_lastaxis(a, win)
a = a.swapaxes(-2, i)
return a
def rolling_window_lastaxis(a, window):
"""Directly taken from Erik Rigtorp's post to numpy-discussion.
<http://www.mail-archive.com/[email protected]/msg29450.html>"""
if window < 1:
raise ValueError, "`window` must be at least 1."
if window > a.shape[-1]:
raise ValueError, "`window` is too long."
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
## stealing ends here... //
def filterheadxy(headx,heady,thrs_denom=10):
b, a = signal.butter(8, 0.125)
dhy = np.abs(np.hstack((0, np.diff(heady,1))))
thrs = np.nanstd(dhy)/thrs_denom
ind2remove = dhy>thrs
headx[ind2remove] = np.nan
heady[ind2remove] = np.nan
headx = interp_nan(headx)
heady = interp_nan(heady)
headx = signal.filtfilt(b, a, headx, padlen=150)
heady = signal.filtfilt(b, a, heady, padlen=150)
return headx,heady
def smoothRad(theta, thrs=np.pi/4*3):
jumps = (np.diff(theta) > thrs).nonzero()[0]
print 'jumps.size', jumps.size
while jumps.size:
# print '%d/%d' % (jumps[0], theta.size)
theta[jumps+1] -= np.pi
jumps = (np.diff(theta) > thrs).nonzero()[0]
return theta
def datadct2array(data, key1, key2):
# put these in a MATLAB CELL
trialN = len(data[key1][key2])
matchedUSnameP = np.zeros((trialN,), dtype=np.object)
fnameP = np.zeros((trialN,), dtype=np.object)
# others to append to a list
eventsP = []
speed3DP = []
movingSTDP = []
d2inflowP = []
xP, yP, zP = [], [], []
XP, YP, ZP = [], [], []
ringpixelsP = []
peaks_withinP = []
swimdir_withinP = []
xy_withinP = []
location_one_thirdP = []
dtheta_shapeP = []
dtheta_velP = []
turns_shapeP = []
turns_velP = []
for n, dct in enumerate(data[key1][key2]):
# MATLAB CELL
matchedUSnameP[n] = dct['matchedUSname']
fnameP[n] = dct['fname']
# 2D array
eventsP.append([ele if type(ele) is not list else ele[0] for ele in dct['events']])
speed3DP.append(dct['speed3D'])
movingSTDP.append(dct['movingSTD'])
d2inflowP.append(dct['d2inflow'])
xP.append(dct['x'])
yP.append(dct['y'])
zP.append(dct['z'])
XP.append(dct['X'])
YP.append(dct['Y'])
ZP.append(dct['Z'])
ringpixelsP.append(dct['ringpixels'])
peaks_withinP.append(dct['peaks_within'])
swimdir_withinP.append(dct['swimdir_within'])
xy_withinP.append(dct['xy_within'])
location_one_thirdP.append(dct['location_one_third'])
dtheta_shapeP.append(dct['dtheta_shape'])
dtheta_velP.append(dct['dtheta_vel'])
turns_shapeP.append(dct['turns_shape'])
turns_velP.append(dct['turns_vel'])
TVroi = np.array(dct['TVroi'])
SVroi = np.array(dct['SVroi'])
return matchedUSnameP, fnameP, np.array(eventsP), np.array(speed3DP), np.array(d2inflowP), \
np.array(xP), np.array(yP), np.array(zP), np.array(XP), np.array(YP), np.array(ZP), \
np.array(ringpixelsP), np.array(peaks_withinP), np.array(swimdir_withinP), \
np.array(xy_withinP), np.array(dtheta_shapeP), np.array(dtheta_velP), \
np.array(turns_shapeP), np.array(turns_velP), TVroi, SVroi
def pickle2mat(fp, data=None):
# fp : full path to pickle file
# data : option to provide data to skip np.load(fp)
if not data:
data = np.load(fp)
for key1 in data.keys():
for key2 in data[key1].keys():
matchedUSname, fname, events, speed3D, d2inflow, x, y, z, X, Y, Z, \
ringpixels, peaks_within, swimdir_within, xy_within, dtheta_shape, dtheta_vel, \
turns_shape, turns_vel, TVroi, SVroi = datadct2array(data, key1, key2)
datadict = {
'matchedUSname' : matchedUSname,
'fname' : fname,
'events' : events,
'speed3D' : speed3D,
'd2inflow' : d2inflow,
'x' : x,
'y' : y,
'z' : z,
'X' : X,
'Y' : Y,
'Z' : Z,
'ringpixels' : ringpixels,
'peaks_within' : peaks_within,
'swimdir_within' : swimdir_within,
'xy_within' : xy_within,
'dtheta_shape' : dtheta_shape,
'dtheta_vel' : dtheta_vel,
'turns_shape' : turns_shape,
'turns_vel' : turns_vel,
'TVroi' : TVroi,
'SVroi' : SVroi,
}
outfp = '%s_%s_%s.mat' % (fp[:-7],key1,key2)
sio.savemat(outfp, datadict, oned_as='row', do_compression=True)
def interp_nan(x):
'''
Replace nan by interporation
http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
'''
ok = -np.isnan(x)
if (ok == False).all():
return x
else:
xp = ok.ravel().nonzero()[0]
fp = x[ok]
_x = np.isnan(x).ravel().nonzero()[0]
x[-ok] = np.interp(_x, xp, fp)
return x
def polytest(x,y,rx,ry,rw,rh,rang):
points=cv2.ellipse2Poly(
(rx,ry),
axes=(rw/2,rh/2),
angle=rang,
arcStart=0,
arcEnd=360,
delta=3
)
return cv2.pointPolygonTest(np.array(points), (x,y), measureDist=1)
def depthCorrection(z,x,TVx1,TVx2,SVy1,SVy2,SVy3):
z0 = z - SVy1
x0 = x - TVx1
mid = (SVy2-SVy1)/2
adj = (z0 - mid) / (SVy2-SVy1) * (SVy2-SVy3) * (1-(x0)/float(TVx2-TVx1))
return z0 + adj + SVy1 # back to abs coord
def putNp2xls(array, ws):
for r, row in enumerate(array):
for c, val in enumerate(row):
ws.write(r, c, val)
def drawLines(mi, ma, events, fps=30.0):
CS, USs, preRange = events
plot([CS-preRange, CS-preRange], [mi,ma], '--c') # 2 min prior odor
plot([CS , CS ], [mi,ma], '--g', linewidth=2) # CS onset
if USs:
if len(USs) > 3:
colors = 'r' * len(USs)
else:
colors = [_ for _ in ['r','b','c'][:len(USs)]]
for c,us in zip(colors, USs):
plot([us, us],[mi,ma], linestyle='--', color=c, linewidth=2) # US onset
plot([USs[0]+preRange/2,USs[0]+preRange/2], [mi,ma], linestyle='--', color=c, linewidth=2) # end of US window
xtck = np.arange(0, max(CS+preRange, max(USs)), 0.5*60*fps) # every 0.5 min tick
else:
xtck = np.arange(0, CS+preRange, 0.5*60*fps) # every 0.5 min tick
xticks(xtck, xtck/fps/60)
gca().xaxis.set_minor_locator(MultipleLocator(5*fps)) # 5 s minor ticks
def approachevents(x,y,z, ringpolyTVArray, ringpolySVArray, fishlength=134, thrs=None):
'''
fishlength: some old scrits may call this with fishlength
thrs: multitrack GUI provides this by ringAppearochLevel spin control.
can be an numpy array (to track water level change etc)
'''
smoothedz = np.convolve(np.hanning(10)/np.hanning(10).sum(), z, 'same')
peaks = argrelextrema(smoothedz, np.less)[0] # less because 0 is top in image.
# now filter peaks by height.
ringLevel = ringpolySVArray[:,1]
if thrs is None:
thrs = ringLevel+fishlength/2
if type(thrs) == int: # can be numpy array or int
thrs = ringLevel.mean() + thrs
peaks = peaks[ z[peaks] < thrs ]
else: # numpy array should be ready to use
peaks = peaks[ z[peaks] < thrs[peaks] ]
# now filter out by TVringCenter
peaks_within = get_withinring(ringpolyTVArray, peaks, x, y)
return smoothedz, peaks_within
def get_withinring(ringpolyTVArray, timepoints, x, y):
rx = ringpolyTVArray[:,0].astype(np.int)
ry = ringpolyTVArray[:,1].astype(np.int)
rw = ringpolyTVArray[:,2].astype(np.int)
rh = ringpolyTVArray[:,3].astype(np.int)
rang = ringpolyTVArray[:,4].astype(np.int)
# poly test
peaks_within = []
for p in timepoints:
points=cv2.ellipse2Poly(
(rx[p],ry[p]),
axes=(rw[p]/2,rh[p]/2),
angle=rang[p],
arcStart=0,
arcEnd=360,
delta=3
)
inout = cv2.pointPolygonTest(np.array(points), (x[p],y[p]), measureDist=1)
if inout > 0:
peaks_within.append(p)
return peaks_within
def location_ring(x,y,ringpolyTVArray):
rx = ringpolyTVArray[:,0].astype(np.int)
ry = ringpolyTVArray[:,1].astype(np.int)
rw = ringpolyTVArray[:,2].astype(np.int)
rh = ringpolyTVArray[:,3].astype(np.int)
d2ringcenter = np.sqrt((x-rx)**2 + (y-ry)**2)
# filter by radius 20% buffer in case the ring moves around
indices = (d2ringcenter < 1.2*max(rw.max(), rh.max())).nonzero()[0]
xy_within = get_withinring(ringpolyTVArray, indices, x, y)
return xy_within
def swimdir_analysis(x,y,z,ringpolyTVArray,ringpolySVArray,TVx1,TVy1,TVx2,TVy2,fps=30.0):
# smoothing
# z = np.convolve(np.hanning(16)/np.hanning(16).sum(), z, 'same')
# two cameras have different zoom settings. So, distance per pixel is different. But, for
# swim direction, it does not matter how much x,y are compressed relative to z.
# ring z level from SV
rz = ringpolySVArray[:,1].astype(np.int)
# ring all other params from TV
rx = ringpolyTVArray[:,0].astype(np.int)
ry = ringpolyTVArray[:,1].astype(np.int)
rw = ringpolyTVArray[:,2].astype(np.int)
rh = ringpolyTVArray[:,3].astype(np.int)
rang = ringpolyTVArray[:,4].astype(np.int)
speed3D = np.sqrt( np.diff(x)**2 + np.diff(y)**2 + np.diff(z)**2 )
speed3D = np.hstack(([0], speed3D))
# line in 3D http://tutorial.math.lamar.edu/Classes/CalcIII/EqnsOfLines.aspx
# x-x0 y-y0 z-z0
# ---- = ---- = ----
# a b c
# solve them for z = rz. x0,y0,z0 are tvx, tvy, svy
# x = (a * (rz-z)) / c + x0
dt = 3 # define slope as diff between current and dt frame before
a = np.hstack( (np.ones(dt), x[dt:]-x[:-dt]) )
b = np.hstack( (np.ones(dt), y[dt:]-y[:-dt]) )
c = np.hstack( (np.ones(dt), z[dt:]-z[:-dt]) )
c[c==0] = np.nan # avoid zero division
water_x = (a * (rz-z) / c) + x
water_y = (b * (rz-z) / c) + y
upwards = c<-2/30.0*fps # not accurate when c is small or negative
xok = (TVx1 < water_x) & (water_x < TVx2)
yok = (TVy1 < water_y) & (water_y < TVy2)
filtered = upwards & xok & yok# & -np.isinf(water_x) & -np.isinf(water_y)
water_x[-filtered] = np.nan
water_y[-filtered] = np.nan
# figure()
# ax = subplot(111)
# ax.imshow(npData['TVbg'], cmap=cm.gray) # clip out from TVx1,TVy1
# ax.plot(x-TVx1, y-TVy1, 'c')
# ax.plot(water_x-TVx1, water_y-TVy1, 'r.')
# xlim([0, TVx2-TVx1]); ylim([TVy2-TVy1, 0])
# draw(); show()
SwimDir = []
for n in filtered.nonzero()[0]:
inout = polytest(water_x[n],water_y[n],rx[n],ry[n],rw[n],rh[n],rang[n])
SwimDir.append((n, inout, speed3D[n])) # inout>0 are inside
return SwimDir, water_x, water_y
def plot_eachTr(events, x, y, z, inflowpos, ringpixels, peaks_within, swimdir_within=None,
pp=None, _title=None, fps=30.0, inmm=False):
CS, USs, preRange = events
# preRange = 3600 2 min prior and 1 min after CS. +900 for 0.5 min
if USs:
xmin, xmax = CS-preRange-10*fps, USs[0]+preRange/2+10*fps
else:
xmin, xmax = CS-preRange-10*fps, CS+preRange/2+(23+10)*fps
fig = figure(figsize=(12,8), facecolor='w')
subplot(511) # Swimming speed
speed3D = np.sqrt( np.diff(x)**2 + np.diff(y)**2 + np.diff(z)**2 )
drawLines(np.nanmin(speed3D), np.nanmax(speed3D), events, fps) # go behind
plot(speed3D)
movingSTD = np.append( np.zeros(fps*10), strided_sliding_std_dev(speed3D, fps*10) )
plot(movingSTD, linewidth=2)
plot(np.ones_like(speed3D) * speed3D.std()*6, '-.', color='gray')
ylim([-5, speed3D[xmin:xmax].max()])
xlim([xmin,xmax]); title(_title)
if inmm:
ylabel('Speed 3D (mm),\n6SD thr');
else:
ylabel('Speed 3D, 6SD thr');
ax = subplot(512) # z level
drawLines(z.min(), z.max(), events)
plot(z, 'b')
pkx = peaks_within.nonzero()[0]
if inmm:
plot(pkx, peaks_within[pkx]*z[xmin:xmax].max()*0.97, 'mo')
if swimdir_within is not None:
___x = swimdir_within.nonzero()[0]
plot(___x, swimdir_within[___x]*z[xmin:xmax].max()*0.96, 'g+')
ylim([z[xmin:xmax].min()*0.95, z[xmin:xmax].max()])
xlim([xmin,xmax]); ylabel('Z (mm)')
else:
plot(pkx, peaks_within[pkx]*z[xmin:xmax].min()*0.97, 'mo')
if swimdir_within is not None:
___x = swimdir_within.nonzero()[0]
plot(___x, swimdir_within[___x]*z[xmin:xmax].min()*0.96, 'g+')
ylim([z[xmin:xmax].min()*0.95, z[xmin:xmax].max()])
ax.invert_yaxis(); xlim([xmin,xmax]); ylabel('z')
subplot(513) # x
drawLines(x.min(), x.max(), events)
plot(x, 'b')
plot(y, 'g')
xlim([xmin,xmax]); ylabel('x,y')
subplot(514) # Distance to the inflow tube
xin, yin, zin = inflowpos
d2inflow = np.sqrt((x-xin) ** 2 + (y-yin) ** 2 + (z-zin) ** 2 )
drawLines(d2inflow.min(), d2inflow.max(), events)
plot(d2inflow)
ylim([d2inflow[xmin:xmax].min(), d2inflow[xmin:xmax].max()])
xlim([xmin,xmax]); ylabel('distance to\ninflow tube')
subplot(515) # ringpixels: it seems i never considered TV x,y for this
rpmax, rpmin = np.nanmax(ringpixels[xmin:xmax]), np.nanmin(ringpixels[xmin:xmax])
drawLines(rpmin, rpmax, events)
plot(ringpixels)
plot(pkx, peaks_within[pkx]*rpmax*1.06, 'mo')
if swimdir_within is not None:
plot(___x, swimdir_within[___x]*rpmax*1.15, 'g+')
ylim([-100, rpmax*1.2])
xlim([xmin,xmax]); ylabel('ringpixels')
tight_layout()
if pp:
fig.savefig(pp, format='pdf')
rng = np.arange(CS-preRange, CS+preRange, dtype=np.int)
return speed3D[rng], movingSTD[rng], d2inflow[rng], ringpixels[rng]
def plot_turnrates(events, dthetasum_shape,dthetasum_vel,turns_shape,turns_vel,
pp=None, _title=None, thrs=np.pi/4*(133.33333333333334/120), fps=30.0):
CS, USs, preRange = events
# preRange = 3600 2 min prior and 1 min after CS. +900 for 0.5 min
if USs:
xmin, xmax = CS-preRange-10*fps, USs[0]+preRange/2+10*fps
else:
xmin, xmax = CS-preRange-10*fps, CS+preRange/2+(23+10)*fps
fig = figure(figsize=(12,8), facecolor='w')
subplot(211)
drawLines(dthetasum_shape.min(), dthetasum_shape.max(), events)
plot(np.ones_like(dthetasum_shape)*thrs,'gray',linestyle='--')
plot(-np.ones_like(dthetasum_shape)*thrs,'gray',linestyle='--')
plot(dthetasum_shape)
dmax = dthetasum_shape[xmin:xmax].max()
plot(turns_shape, (0.5+dmax)*np.ones_like(turns_shape), 'o')
temp = np.zeros_like(dthetasum_shape)
temp[turns_shape] = 1
shape_cumsum = np.cumsum(temp)
shape_cumsum -= shape_cumsum[xmin]
plot( shape_cumsum / shape_cumsum[xmax] * (dmax-dthetasum_shape.min()) + dthetasum_shape.min())
xlim([xmin,xmax]); ylabel('Shape based'); title('Orientation change per 4 frames: ' + _title)
ylim([dthetasum_shape[xmin:xmax].min()-1, dmax+1])
subplot(212)
drawLines(dthetasum_vel.min(), dthetasum_vel.max(), events)
plot(np.ones_like(dthetasum_vel)*thrs,'gray',linestyle='--')
plot(-np.ones_like(dthetasum_vel)*thrs,'gray',linestyle='--')
plot(dthetasum_vel)
dmax = dthetasum_vel[xmin:xmax].max()
plot(turns_vel, (0.5+dmax)*np.ones_like(turns_vel), 'o')
temp = np.zeros_like(dthetasum_vel)
temp[turns_vel] = 1
vel_cumsum = np.cumsum(temp)
vel_cumsum -= vel_cumsum[xmin]
plot( vel_cumsum / vel_cumsum[xmax] * (dmax-dthetasum_shape.min()) + dthetasum_shape.min())
ylim([dthetasum_vel[xmin:xmax].min()-1, dmax+1])
xlim([xmin,xmax]); ylabel('Velocity based')
tight_layout()
if pp:
fig.savefig(pp, format='pdf')
def trajectory(x, y, z, rng, ax, _xlim=[0,640], _ylim=[480,480+300], _zlim=[150,340],
color='b', fps=30.0, ringpolygon=None):
ax.plot(x[rng],y[rng],z[rng], color=color)
ax.view_init(azim=-75, elev=-180+15)
if ringpolygon:
rx, ry, rz = ringpolygon
ax.plot(rx, ry, rz, color='gray')
ax.set_xlim(_xlim[0],_xlim[1])
ax.set_ylim(_ylim[0],_ylim[1])
ax.set_zlim(_zlim[0],_zlim[1])
title(("(%2.1f min to %2.1f min)" % (rng[0]/fps/60.0,(rng[-1]+1)/60.0/fps)))
draw()
def plotTrajectory(x, y, z, events, _xlim=None, _ylim=None, _zlim=None, fps=30.0, pp=None, ringpolygon=None):
CS, USs, preRange = events
rng1 = np.arange(CS-preRange, CS-preRange/2, dtype=int)
rng2 = np.arange(CS-preRange/2, CS, dtype=int)
if USs:
rng3 = np.arange(CS, min(USs), dtype=int)
rng4 = np.arange(min(USs), min(USs)+preRange/2, dtype=int)
combined = np.hstack((rng1,rng2,rng3,rng4))
else:
combined = np.hstack((rng1,rng2))
if _xlim is None:
_xlim = map( int, ( x[combined].min(), x[combined].max() ) )
if _ylim is None:
_ylim = map( int, ( y[combined].min(), y[combined].max() ) )
if _zlim is None:
_zlim = map( int, ( z[combined].min(), z[combined].max() ) )
if ringpolygon:
_zlim[0] = min( _zlim[0], int(ringpolygon[2][0]) )
fig3D = plt.figure(figsize=(12,8), facecolor='w')
ax = fig3D.add_subplot(221, projection='3d'); trajectory(x,y,z,rng1,ax,_xlim,_ylim,_zlim,'c',fps,ringpolygon)
ax = fig3D.add_subplot(222, projection='3d'); trajectory(x,y,z,rng2,ax,_xlim,_ylim,_zlim,'c',fps,ringpolygon)
if USs:
ax = fig3D.add_subplot(223, projection='3d'); trajectory(x,y,z,rng3,ax,_xlim,_ylim,_zlim,'g',fps,ringpolygon)
ax = fig3D.add_subplot(224, projection='3d'); trajectory(x,y,z,rng4,ax,_xlim,_ylim,_zlim,'r',fps,ringpolygon)
tight_layout()
if pp:
fig3D.savefig(pp, format='pdf')
def add2DataAndPlot(fp, fish, data, createPDF):
if createPDF:
pp = PdfPages(fp[:-7]+'_'+fish+'.pdf')
else:
pp = None
params = np.load(fp)
fname = os.path.basename(fp).split('.')[0] + '.avi'
dirname = os.path.dirname(fp)
preRange = params[(fname, 'mog')]['preRange']
fps = params[(fname, 'mog')]['fps']
TVx1 = params[(fname, fish)]['TVx1']
TVy1 = params[(fname, fish)]['TVy1']
TVx2 = params[(fname, fish)]['TVx2']
TVy2 = params[(fname, fish)]['TVy2']
SVx1 = params[(fname, fish)]['SVx1']
SVx2 = params[(fname, fish)]['SVx2']
SVx3 = params[(fname, fish)]['SVx3']
SVy1 = params[(fname, fish)]['SVy1']
SVy2 = params[(fname, fish)]['SVy2']
SVy3 = params[(fname, fish)]['SVy3']
ringAppearochLevel = params[(fname, fish)]['ringAppearochLevel']
_npz = os.path.join(dirname, os.path.join('%s_%s.npz' % (fname[:-4], fish)))
# if os.path.exists(_npz):
npData = np.load(_npz)
tvx = npData['TVtracking'][:,0] # x with nan
tvy = npData['TVtracking'][:,1] # y
headx = npData['TVtracking'][:,3] # headx
heady = npData['TVtracking'][:,4] # heady
svy = npData['SVtracking'][:,1] # z
InflowTubeTVArray = npData['InflowTubeTVArray']
InflowTubeSVArray = npData['InflowTubeSVArray']
inflowpos = InflowTubeTVArray[:,0], InflowTubeTVArray[:,1], InflowTubeSVArray[:,1]
ringpixels = npData['ringpixel']
ringpolyTVArray = npData['ringpolyTVArray']
ringpolySVArray = npData['ringpolySVArray']
TVbg = npData['TVbg']
print os.path.basename(_npz), 'loaded.'
x,y,z = map(interp_nan, [tvx,tvy,svy])
# z level correction by depth (x)
z = depthCorrection(z,x,TVx1,TVx2,SVy1,SVy2,SVy3)
smoothedz, peaks_within = approachevents(x, y, z,
ringpolyTVArray, ringpolySVArray, thrs=ringAppearochLevel)
# convert to numpy array from list
temp = np.zeros_like(x)
temp[peaks_within] = 1
peaks_within = temp
# normalize to mm
longaxis = float(max((TVx2-TVx1), (TVy2-TVy1))) # before rotation H is applied they are orthogonal
waterlevel = float(SVy2-SVy1)
X = (x-TVx1) / longaxis * CHMAMBER_LENGTH
Y = (TVy2-y) / longaxis * CHMAMBER_LENGTH
Z = (SVy2-z) / waterlevel * WATER_HIGHT # bottom of chamber = 0, higher more positive
inflowpos_mm = ((inflowpos[0]-TVx1) / longaxis * CHMAMBER_LENGTH,
(TVy2-inflowpos[1]) / longaxis * CHMAMBER_LENGTH,
(SVy2-inflowpos[2]) / waterlevel * WATER_HIGHT )
# do the swim direction analysis here
swimdir, water_x, water_y = swimdir_analysis(x,y,z,
ringpolyTVArray,ringpolySVArray,TVx1,TVy1,TVx2,TVy2,fps)
# all of swimdir are within ROI (frame#, inout, speed) but not necessary within ring
sdir = np.array(swimdir)
withinRing = sdir[:,1]>0 # inout>0 are inside ring
temp = np.zeros_like(x)
temp[ sdir[withinRing,0].astype(int) ] = 1
swimdir_within = temp
# location_ring
xy_within = location_ring(x,y, ringpolyTVArray)
temp = np.zeros_like(x)
temp[xy_within] = 1
xy_within = temp
# location_one_third
if (TVx2-TVx1) > (TVy2-TVy1):
if np.abs(np.arange(TVx1, longaxis+TVx1, longaxis/3) + longaxis/6 - inflowpos[0].mean()).argmin() == 2:
location_one_third = x-TVx1 > longaxis/3*2
else:
location_one_third = x < longaxis/3
else:
if np.abs(np.arange(TVy1, longaxis+TVy1, longaxis/3) + longaxis/6 - inflowpos[1].mean()).argmin() == 2:
location_one_third = y-TVy1 > longaxis/3*2
else:
location_one_third = y < longaxis/3
# turn rate analysis (shape based)
heady, headx = map(interp_nan, [heady, headx])
headx, heady = filterheadxy(headx, heady)
dy = heady - y
dx = headx - x
theta_shape = np.arctan2(dy, dx)
# velocity based
cx, cy = filterheadxy(x.copy(), y.copy()) # centroid x,y
vx = np.append(0, np.diff(cx))
vy = np.append(0, np.diff(cy))
theta_vel = np.arctan2(vy, vx)
# prepare ringpolygon for trajectory plot
rx, ry, rw, rh, rang = ringpolyTVArray.mean(axis=0).astype(int) # use mm ver above
rz = ringpolySVArray.mean(axis=0)[1].astype(int)
RX = (rx-TVx1) / longaxis * CHMAMBER_LENGTH
RY = (TVy2-ry) / longaxis * CHMAMBER_LENGTH
RW = rw / longaxis * CHMAMBER_LENGTH / 2
RH = rh / longaxis * CHMAMBER_LENGTH / 2
RZ = (SVy2-rz) / waterlevel * WATER_HIGHT
points = cv2.ellipse2Poly(
(RX.astype(int),RY.astype(int)),
axes=(RW.astype(int),RH.astype(int)),
angle=rang,
arcStart=0,
arcEnd=360,
delta=3
)
ringpolygon = [points[:,0], points[:,1], np.ones(points.shape[0]) * RZ]
eventTypeKeys = params[(fname, fish)]['EventData'].keys()
CSs = [_ for _ in eventTypeKeys if _.startswith('CS')]
USs = [_ for _ in eventTypeKeys if _.startswith('US')]
# print CSs, USs
# events
for CS in CSs:
CS_Timings = params[(fname, fish)]['EventData'][CS]
CS_Timings.sort()
# initialize when needed
if CS not in data[fish].keys():
data[fish][CS] = []
# now look around for US after it within preRange
for t in CS_Timings:
tr = len(data[fish][CS])+1
rng = np.arange(t-preRange, t+preRange, dtype=np.int)
matchedUSname = None
for us in USs:
us_Timings = params[(fname, fish)]['EventData'][us]
matched = [_ for _ in us_Timings if t-preRange < _ < t+preRange]
if matched:
events = [t, matched, preRange] # ex. CS+
matchedUSname = us
break
else:
continue
_title = '(%s, %s) trial#%02d %s (%s)' % (CS, matchedUSname[0], tr, fname, fish)
print _title, events
_speed3D, _movingSTD, _d2inflow, _ringpixels = plot_eachTr(events, X, Y, Z, inflowpos_mm,
ringpixels, peaks_within, swimdir_within, pp, _title, fps, inmm=True)
# 3d trajectory
_xlim = (0, CHMAMBER_LENGTH)
_zlim = (RZ.max(),0)
plotTrajectory(X, Y, Z, events, _xlim=_xlim, _zlim=_zlim, fps=fps, pp=pp, ringpolygon=ringpolygon)
# turn rate analysis
# shape based
theta_shape[rng] = smoothRad(theta_shape[rng].copy(), thrs=np.pi/2)
dtheta_shape = np.append(0, np.diff(theta_shape)) # full length
kernel = np.ones(4)
dthetasum_shape = np.convolve(dtheta_shape, kernel, 'same')
# 4 frames = 1000/30.0*4 = 133.3 ms
thrs = (np.pi / 2) * (133.33333333333334/120) # Braubach et al 2009 90 degree in 120 ms
peaks_shape = argrelextrema(abs(dthetasum_shape), np.greater)[0]
turns_shape = peaks_shape[ (abs(dthetasum_shape[peaks_shape]) > thrs).nonzero()[0] ]
# velocity based
theta_vel[rng] = smoothRad(theta_vel[rng].copy(), thrs=np.pi/2)
dtheta_vel = np.append(0, np.diff(theta_vel))
dthetasum_vel = np.convolve(dtheta_vel, kernel, 'same')
peaks_vel = argrelextrema(abs(dthetasum_vel), np.greater)[0]
turns_vel = peaks_vel[ (abs(dthetasum_vel[peaks_vel]) > thrs).nonzero()[0] ]
plot_turnrates(events, dthetasum_shape, dthetasum_vel, turns_shape, turns_vel, pp, _title, fps=fps)
_temp = np.zeros_like(dtheta_shape)
_temp[turns_shape] = 1
turns_shape_array = _temp
_temp = np.zeros_like(dtheta_vel)
_temp[turns_vel] = 1
turns_vel_array = _temp
# plot swim direction analysis
fig = figure(figsize=(12,8), facecolor='w')
ax1 = subplot(211)
ax1.imshow(TVbg, cmap=cm.gray) # TVbg is clip out of ROI
ax1.plot(x[rng]-TVx1, y[rng]-TVy1, 'gray')
ax1.plot(water_x[t-preRange:t]-TVx1, water_y[t-preRange:t]-TVy1, 'c.')
if matched:
ax1.plot( water_x[t:matched[0]]-TVx1,
water_y[t:matched[0]]-TVy1, 'g.')
ax1.plot( water_x[matched[0]:matched[0]+preRange/4]-TVx1,
water_y[matched[0]:matched[0]+preRange/4]-TVy1, 'r.')
xlim([0, TVx2-TVx1]); ylim([TVy2-TVy1, 0])
title(_title)
ax2 = subplot(212)
ax2.plot( swimdir_within )
ax2.plot( peaks_within*1.15-0.1, 'mo' )
if matched:
xmin, xmax = t-preRange-10*fps, matched[0]+preRange/4
else:
xmin, xmax = t-preRange-10*fps, t+preRange/2+10*fps
gzcs = np.cumsum(swimdir_within)
gzcs -= gzcs[xmin]
ax2.plot( gzcs/gzcs[xmax] )
drawLines(0,1.2, events)
ylim([0,1.2])
xlim([xmin, xmax])
ylabel('|: SwimDirection\no: approach events')
data[fish][CS].append( {
'fname' : fname,
'x': x[rng], 'y': y[rng], 'z': z[rng],
'X': X[rng], 'Y': Y[rng], 'Z': Z[rng], # calibrate space (mm)
'speed3D': _speed3D, # calibrate space (mm)
'movingSTD' : _movingSTD, # calibrate space (mm)
'd2inflow': _d2inflow, # calibrate space (mm)
'ringpixels': _ringpixels,
'peaks_within': peaks_within[rng],
'xy_within': xy_within[rng],
'location_one_third' : location_one_third[rng],
'swimdir_within' : swimdir_within[rng],
'dtheta_shape': dtheta_shape[rng],
'dtheta_vel': dtheta_vel[rng],
'turns_shape': turns_shape_array[rng], # already +/- preRange
'turns_vel': turns_vel_array[rng],
'events' : events,
'matchedUSname' : matchedUSname,
'TVroi' : (TVx1,TVy1,TVx2,TVy2),
'SVroi' : (SVx1,SVy1,SVx2,SVy2),
} )
if pp:
fig.savefig(pp, format='pdf')
close('all') # release memory ASAP!
if pp:
pp.close()
def getPDFs(pickle_files, fishnames=None, createPDF=True):
# type checking args
if type(pickle_files) is str:
pickle_files = [pickle_files]
# convert to a list or set of fish names
if type(fishnames) is str:
fishnames = [fishnames]
elif not fishnames:
fishnames = set()
# re-organize trials into a dict "data"
data = {}
# figure out trial number (sometime many trials in one files) for each fish
# go through all pickle_files and use timestamps of file to sort events.
timestamps = []
for fp in pickle_files:
# collect ctime of pickled files
fname = os.path.basename(fp).split('.')[0] + '.avi'
timestamps.append( time.strptime(fname, "%b-%d-%Y_%H_%M_%S.avi") )
# look into the pickle and collect fish analyzed
params = np.load(fp) # loading pickled file!
if type(fishnames) is set:
for fish in [fs for fl,fs in params.keys() if fl == fname and fs != 'mog']:
fishnames.add(fish)
timestamps = sorted(range(len(timestamps)), key=timestamps.__getitem__)
# For each fish, go thru all pickled files
for fish in fishnames:
data[fish] = {}
# now go thru the sorted
for ind in timestamps:
fp = pickle_files[ind]
print 'processing #%d\n%s' % (ind, fp)
add2DataAndPlot(fp, fish, data, createPDF)
return data
def plotTrials(data, fish, CSname, key, step, offset=0, pp=None):
fig = figure(figsize=(12,8), facecolor='w')
ax1 = fig.add_subplot(121) # raw trace
ax2 = fig.add_subplot(222) # learning curve
ax3 = fig.add_subplot(224) # bar plot
preP, postP, postP2 = [], [], []
longestUS = 0
for n, measurement in enumerate(data[fish][CSname]):
tr = n+1
CS, USs, preRange = measurement['events']
subplot(ax1)
mi = -step*(tr-1)
ma = mi + step
drawLines(mi, ma, (preRange, [preRange+(USs[0]-CS)], preRange))
longestUS = max([us-CS+preRange*3/2 for us in USs]+[longestUS])
# 'measurement[key]': vector around the CS timing (+/-) preRange. i.e., preRange is the center
ax1.plot(measurement[key]-step*(tr-1)+offset)
title(CSname+': '+key) # cf. preRange = 3600 frames
pre = measurement[key][:preRange].mean()+offset # 2 min window
post = measurement[key][preRange:preRange+(USs[0]-CS)].mean()+offset # 23 s window
post2 = measurement[key][preRange+(USs[0]-CS):preRange*3/2+(USs[0]-CS)].mean()+offset # 1 min window after US
preP.append(pre)
postP.append(post)
postP2.append(post2)
ax3.plot([1, 2, 3], [pre, post, post2],'o-')
ax1.set_xlim([0,longestUS])
ax1.axis('off')
subplot(ax2)
x = range(1, tr+1)
y = np.diff((preP,postP), axis=0).ravel()
ax2.plot( x, y, 'ko-', linewidth=2 )
ax2.plot( x, np.zeros_like(x), '-.', linewidth=1, color='gray' )
# grid()
slope, intercept, rvalue, pval, stderr = stats.stats.linregress(x,y)
title('slope = zero? p-value = %f' % pval)
ax2.set_xlabel("Trial#")
ax2.set_xlim([0.5,tr+0.5])
ax2.set_ylabel('CS - pre')
subplot(ax3)
ax3.bar([0.6, 1.6, 2.6], [np.nanmean(preP), np.nanmean(postP), np.nanmean(postP2)], facecolor='none')
t, pval = stats.ttest_rel(postP, preP)
title('paired t p-value = %f' % pval)
ax3.set_xticks([1,2,3])
ax3.set_xticklabels(['pre', CSname, measurement['matchedUSname']])
ax3.set_xlim([0.5,3.5])
ax3.set_ylabel('Raw mean values')
tight_layout(2, h_pad=1, w_pad=1)
if pp:
fig.savefig(pp, format='pdf')
close('all')
return np.vstack((preP, postP, postP2))
def getSummary(data, dirname=None):
for fish in data.keys():
for CSname in data[fish].keys():
if dirname:
pp = PdfPages(os.path.join(dirname, '%s_for_%s.pdf' % (CSname,fish)))
print 'generating %s_for_%s.pdf' % (CSname,fish)
book = Workbook()
sheet1 = book.add_sheet('speed3D')
avgs = plotTrials(data, fish, CSname, 'speed3D', 30, pp=pp)
putNp2xls(avgs, sheet1)
sheet2 = book.add_sheet('d2inflow')
avgs = plotTrials(data, fish, CSname, 'd2inflow', 200, pp=pp)
putNp2xls(avgs, sheet2)
# sheet3 = book.add_sheet('smoothedz')
sheet3 = book.add_sheet('Z')
# avgs = plotTrials(data, fish, CSname, 'smoothedz', 100, pp=pp)
avgs = plotTrials(data, fish, CSname, 'Z', 30, pp=pp)
putNp2xls(avgs, sheet3)
sheet4 = book.add_sheet('ringpixels')
avgs = plotTrials(data, fish, CSname, 'ringpixels', 1200, pp=pp)
putNp2xls(avgs, sheet4)
sheet5 = book.add_sheet('peaks_within')
avgs = plotTrials(data, fish, CSname, 'peaks_within', 1.5, pp=pp)
putNp2xls(avgs, sheet5)
sheet6 = book.add_sheet('swimdir_within')
avgs = plotTrials(data, fish, CSname, 'swimdir_within', 1.5, pp=pp)
putNp2xls(avgs, sheet6)
sheet7 = book.add_sheet('xy_within')
avgs = plotTrials(data, fish, CSname, 'xy_within', 1.5, pp=pp)
putNp2xls(avgs, sheet7)
sheet8 = book.add_sheet('turns_shape')
avgs = plotTrials(data, fish, CSname, 'turns_shape', 1.5, pp=pp)
putNp2xls(avgs, sheet8)
sheet9 = book.add_sheet('turns_vel')
avgs = plotTrials(data, fish, CSname, 'turns_vel', 1.5, pp=pp)
putNp2xls(avgs, sheet9)
if dirname:
pp.close()
book.save(os.path.join(dirname, '%s_for_%s.xls' % (CSname,fish)))
close('all')
else:
show()
def add2Pickles(dirname, pickle_files):
# dirname : folder to look for pickle files
# pickle_files : output, a list to be concatenated.
pattern = os.path.join(dirname, '*.pickle')
temp = [_ for _ in glob(pattern) if not _.endswith('- Copy.pickle') and
not os.path.basename(_).startswith('Summary')]
pickle_files += temp
if __name__ == '__main__':
pickle_files = []
# small test data
# add2Pickles('R:/Data/itoiori/behav/adult whitlock/conditioning/NeuroD/Aug4/test', pickle_files)
# outputdir = 'R:/Data/itoiori/behav/adult whitlock/conditioning/NeuroD/Aug4/test'
# show me what you got
for pf in pickle_files:
print pf
fp = os.path.join(outputdir, 'Summary.pickle')
createPDF = True # useful when plotting etc code updated
if 1: # refresh analysis
data = getPDFs(pickle_files, createPDF=createPDF)
import cPickle as pickle
with open(os.path.join(outputdir, 'Summary.pickle'), 'wb') as f:
pickle.dump(data, f)
else: # or reuse previous
data = np.load(fp)
getSummary(data, outputdir)
pickle2mat(fp, data)
|
bsd-3-clause
| 915,207,148,159,077,800 | 7,446,236,433,047,007,000 | 36.435306 | 124 | 0.567291 | false |
RapidApplicationDevelopment/tensorflow
|
tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py
|
12
|
9744
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.metrics.python.ops import histogram_ops
class Strict1dCumsumTest(tf.test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = tf.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = tf.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = tf.constant([3], dtype=tf.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = tf.constant([3], dtype=tf.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = tf.constant([1, 2, 3], dtype=tf.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = tf.constant([1, 3, 6], dtype=tf.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(tf.test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = tf.constant([], shape=[0], dtype=tf.bool)
scores = tf.constant([], shape=[0], dtype=tf.float32)
score_range = [0, 1.]
auc, update_op = tf.contrib.metrics.auc_using_histogram(labels, scores,
score_range)
tf.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = tf.placeholder(tf.bool, shape=[num_records])
scores = tf.placeholder(tf.float32, shape=[num_records])
auc, update_op = tf.contrib.metrics.auc_using_histogram(labels,
scores,
score_range,
nbins=nbins)
tf.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
| -4,243,083,067,913,548,300 | -6,747,827,289,625,823,000 | 38.290323 | 80 | 0.562295 | false |
DavidNorman/tensorflow
|
tensorflow/python/ops/weights_broadcast_ops.py
|
133
|
7197
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Weight broadcasting operations.
In `tf.losses` and `tf.metrics`, we support limited weight broadcasting. This
file includes operations for those broadcasting rules.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sets
def _has_valid_dims(weights_shape, values_shape):
with ops.name_scope(
None, "has_invalid_dims", (weights_shape, values_shape)) as scope:
values_shape_2d = array_ops.expand_dims(values_shape, -1)
valid_dims = array_ops.concat(
(values_shape_2d, array_ops.ones_like(values_shape_2d)), axis=1)
weights_shape_2d = array_ops.expand_dims(weights_shape, -1)
invalid_dims = sets.set_difference(weights_shape_2d, valid_dims)
num_invalid_dims = array_ops.size(
invalid_dims.values, name="num_invalid_dims")
return math_ops.equal(0, num_invalid_dims, name=scope)
def _has_valid_nonscalar_shape(
weights_rank, weights_shape, values_rank, values_shape):
with ops.name_scope(
None, "has_valid_nonscalar_shape",
(weights_rank, weights_shape, values_rank, values_shape)) as scope:
is_same_rank = math_ops.equal(
values_rank, weights_rank, name="is_same_rank")
return control_flow_ops.cond(
is_same_rank,
lambda: _has_valid_dims(weights_shape, values_shape),
lambda: is_same_rank,
name=scope)
_ASSERT_BROADCASTABLE_ERROR_PREFIX = "weights can not be broadcast to values."
def assert_broadcastable(weights, values):
"""Asserts `weights` can be broadcast to `values`.
In `tf.losses` and `tf.metrics`, we support limited weight broadcasting. We
let weights be either scalar, or the same rank as the target values, with each
dimension either 1, or the same as the corresponding values dimension.
Args:
weights: `Tensor` of weights.
values: `Tensor` of values to which weights are applied.
Returns:
`Operation` raising `InvalidArgumentError` if `weights` has incorrect shape.
`no_op` if static checks determine `weights` has correct shape.
Raises:
ValueError: If static checks determine `weights` has incorrect shape.
"""
with ops.name_scope(None, "assert_broadcastable", (weights, values)) as scope:
with ops.name_scope(None, "weights", (weights,)) as weights_scope:
weights = ops.convert_to_tensor(weights, name=weights_scope)
weights_shape = array_ops.shape(weights, name="shape")
weights_rank = array_ops.rank(weights, name="rank")
weights_rank_static = tensor_util.constant_value(weights_rank)
with ops.name_scope(None, "values", (values,)) as values_scope:
values = ops.convert_to_tensor(values, name=values_scope)
values_shape = array_ops.shape(values, name="shape")
values_rank = array_ops.rank(values, name="rank")
values_rank_static = tensor_util.constant_value(values_rank)
# Try static checks.
if weights_rank_static is not None and values_rank_static is not None:
if weights_rank_static == 0:
return control_flow_ops.no_op(name="static_scalar_check_success")
if weights_rank_static != values_rank_static:
raise ValueError(
"%s values.rank=%s. weights.rank=%s."
" values.shape=%s. weights.shape=%s." % (
_ASSERT_BROADCASTABLE_ERROR_PREFIX, values_rank_static,
weights_rank_static, values.shape, weights.shape))
weights_shape_static = tensor_util.constant_value(weights_shape)
values_shape_static = tensor_util.constant_value(values_shape)
if weights_shape_static is not None and values_shape_static is not None:
# Sanity check, this should always be true since we checked rank above.
ndims = len(values_shape_static)
assert ndims == len(weights_shape_static)
for i in range(ndims):
if weights_shape_static[i] not in (1, values_shape_static[i]):
raise ValueError(
"%s Mismatch at dim %s. values.shape=%s weights.shape=%s." % (
_ASSERT_BROADCASTABLE_ERROR_PREFIX, i, values_shape_static,
weights_shape_static))
return control_flow_ops.no_op(name="static_dims_check_success")
# Dynamic checks.
is_scalar = math_ops.equal(0, weights_rank, name="is_scalar")
data = (
_ASSERT_BROADCASTABLE_ERROR_PREFIX,
"weights.shape=", weights.name, weights_shape,
"values.shape=", values.name, values_shape,
"is_scalar=", is_scalar,
)
is_valid_shape = control_flow_ops.cond(
is_scalar,
lambda: is_scalar,
lambda: _has_valid_nonscalar_shape( # pylint: disable=g-long-lambda
weights_rank, weights_shape, values_rank, values_shape),
name="is_valid_shape")
return control_flow_ops.Assert(is_valid_shape, data, name=scope)
def broadcast_weights(weights, values):
"""Broadcast `weights` to the same shape as `values`.
This returns a version of `weights` following the same broadcast rules as
`mul(weights, values)`, but limited to the weights shapes allowed by
`assert_broadcastable`. When computing a weighted average, use this function
to broadcast `weights` before summing them; e.g.,
`reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.
Args:
weights: `Tensor` whose shape is broadcastable to `values` according to the
rules of `assert_broadcastable`.
values: `Tensor` of any shape.
Returns:
`weights` broadcast to `values` shape according to the rules of
`assert_broadcastable`.
"""
with ops.name_scope(None, "broadcast_weights", (weights, values)) as scope:
values = ops.convert_to_tensor(values, name="values")
weights = ops.convert_to_tensor(
weights, dtype=values.dtype.base_dtype, name="weights")
# Try static check for exact match.
weights_shape = weights.get_shape()
values_shape = values.get_shape()
if (weights_shape.is_fully_defined() and
values_shape.is_fully_defined() and
weights_shape.is_compatible_with(values_shape)):
return weights
with ops.control_dependencies((assert_broadcastable(weights, values),)):
return math_ops.multiply(
weights, array_ops.ones_like(values), name=scope)
|
apache-2.0
| -2,451,056,405,268,392,400 | 9,085,854,914,353,723,000 | 41.585799 | 80 | 0.682784 | false |
ecederstrand/django
|
django/db/backends/base/features.py
|
193
|
9883
|
from django.db.models.aggregates import StdDev
from django.db.utils import ProgrammingError
from django.utils.functional import cached_property
class BaseDatabaseFeatures(object):
gis_enabled = False
allows_group_by_pk = False
allows_group_by_selected_pks = False
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate NULL rows in a nullable
# unique field? All core backends implement this correctly, but other
# databases such as SQL Server do not.
supports_nullable_unique_constraints = True
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists and some fields are nullable but not all of them?
supports_partially_nullable_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
has_bulk_insert = False
uses_savepoints = False
can_release_savepoints = False
can_combine_inserts_with_and_without_auto_increment_pk = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
has_select_for_update = False
has_select_for_update_nowait = False
supports_select_related = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does the backend truncate names properly when they are too long?
truncates_names = False
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Is there a true datatype for uuid?
has_native_uuid_field = False
# Is there a true datatype for timedeltas?
has_native_duration_field = False
# Does the database driver support timedeltas as arguments?
# This is only relevant when there is a native duration field.
# Specifically, there is a bug with cx_Oracle:
# https://bitbucket.org/anthony_tuininga/cx_oracle/issue/7/
driver_supports_timedelta_args = False
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# Does the database have a copy of the zoneinfo database?
has_zoneinfo_database = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Does the backend order NULL values as largest or smallest?
nulls_order_largest = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have an autoincrement primary key of 0? MySQL says No.
allows_auto_pk_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Can the backend determine reliably the length of a CharField?
can_introspect_max_length = True
# Can the backend determine reliably if a field is nullable?
# Note that this is separate from interprets_empty_strings_as_nulls,
# although the latter feature, when true, interferes with correct
# setting (and introspection) of CharFields' nullability.
# This is True for all core backends.
can_introspect_null = True
# Can the backend introspect the default value of a column?
can_introspect_default = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Can the backend introspect an AutoField, instead of an IntegerField?
can_introspect_autofield = False
# Can the backend introspect a BigIntegerField, instead of an IntegerField?
can_introspect_big_integer_field = True
# Can the backend introspect an BinaryField, instead of an TextField?
can_introspect_binary_field = True
# Can the backend introspect an DecimalField, instead of an FloatField?
can_introspect_decimal_field = True
# Can the backend introspect an IPAddressField, instead of an CharField?
can_introspect_ip_address_field = False
# Can the backend introspect a PositiveIntegerField, instead of an IntegerField?
can_introspect_positive_integer_field = False
# Can the backend introspect a SmallIntegerField, instead of an IntegerField?
can_introspect_small_integer_field = False
# Can the backend introspect a TimeField, instead of a DateTimeField?
can_introspect_time_field = True
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
# Does the backend decide to commit before SAVEPOINT statements
# when autocommit is disabled? http://bugs.python.org/issue8145#msg109965
autocommits_when_autocommit_is_off = False
# Does the backend prevent running SQL queries in broken transactions?
atomic_transactions = True
# Can we roll back DDL in a transaction?
can_rollback_ddl = False
# Can we issue more than one ALTER COLUMN clause in an ALTER TABLE?
supports_combined_alters = False
# Does it support foreign keys?
supports_foreign_keys = True
# Does it support CHECK constraints?
supports_column_check_constraints = True
# Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
# supported by the Python driver
supports_paramstyle_pyformat = True
# Does the backend require literal defaults, rather than parameterized ones?
requires_literal_defaults = False
# Does the backend require a connection reset after each material schema change?
connection_persists_old_columns = False
# What kind of error does the backend throw when accessing closed cursor?
closed_cursor_error_class = ProgrammingError
# Does 'a' LIKE 'A' match?
has_case_insensitive_like = True
# Does the backend require the sqlparse library for splitting multi-line
# statements before executing them?
requires_sqlparse_for_splitting = True
# Suffix for backends that don't support "SELECT xxx;" queries.
bare_select_suffix = ''
# If NULL is implied on columns without needing to be explicitly specified
implied_column_null = False
uppercases_column_names = False
# Does the backend support "select for update" queries with limit (and offset)?
supports_select_for_update_with_limit = True
# Does the backend ignore null expressions in GREATEST and LEAST queries unless
# every expression is null?
greatest_least_ignores_nulls = False
# Can the backend clone databases for parallel test execution?
# Defaults to False to allow third-party backends to opt-in.
can_clone_databases = False
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_transactions(self):
"""Confirm support for transactions."""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection.set_autocommit(False)
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection.rollback()
self.connection.set_autocommit(True)
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
return count == 0
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions."""
try:
self.connection.ops.check_expression_support(StdDev(1))
return True
except NotImplementedError:
return False
def introspected_boolean_field_type(self, field=None, created_separately=False):
"""
What is the type returned when the backend introspects a BooleanField?
The optional arguments may be used to give further details of the field to be
introspected; in particular, they are provided by Django's test suite:
field -- the field definition
created_separately -- True if the field was added via a SchemaEditor's AddField,
False if the field was created with the model
Note that return value from this function is compared by tests against actual
introspection results; it should provide expectations, not run an introspection
itself.
"""
if self.can_introspect_null and field and field.null:
return 'NullBooleanField'
return 'BooleanField'
|
bsd-3-clause
| -8,711,101,720,647,205,000 | -3,117,214,580,085,003,300 | 37.011538 | 88 | 0.7094 | false |
kubeup/archon
|
vendor/github.com/influxdata/influxdb/build.py
|
21
|
41033
|
#!/usr/bin/python2.7 -u
import sys
import os
import subprocess
import time
from datetime import datetime
import shutil
import tempfile
import hashlib
import re
import logging
import argparse
################
#### InfluxDB Variables
################
# Packaging variables
PACKAGE_NAME = "influxdb"
INSTALL_ROOT_DIR = "/usr/bin"
LOG_DIR = "/var/log/influxdb"
DATA_DIR = "/var/lib/influxdb"
SCRIPT_DIR = "/usr/lib/influxdb/scripts"
CONFIG_DIR = "/etc/influxdb"
LOGROTATE_DIR = "/etc/logrotate.d"
MAN_DIR = "/usr/share/man"
INIT_SCRIPT = "scripts/init.sh"
SYSTEMD_SCRIPT = "scripts/influxdb.service"
PREINST_SCRIPT = "scripts/pre-install.sh"
POSTINST_SCRIPT = "scripts/post-install.sh"
POSTUNINST_SCRIPT = "scripts/post-uninstall.sh"
LOGROTATE_SCRIPT = "scripts/logrotate"
DEFAULT_CONFIG = "etc/config.sample.toml"
# Default AWS S3 bucket for uploads
DEFAULT_BUCKET = "dl.influxdata.com/influxdb/artifacts"
CONFIGURATION_FILES = [
CONFIG_DIR + '/influxdb.conf',
LOGROTATE_DIR + '/influxdb',
]
PACKAGE_LICENSE = "MIT"
PACKAGE_URL = "https://github.com/influxdata/influxdb"
MAINTAINER = "[email protected]"
VENDOR = "InfluxData"
DESCRIPTION = "Distributed time-series database."
prereqs = [ 'git', 'go' ]
go_vet_command = "go tool vet ./"
optional_prereqs = [ 'fpm', 'rpmbuild', 'gpg' ]
fpm_common_args = "-f -s dir --log error \
--vendor {} \
--url {} \
--after-install {} \
--before-install {} \
--after-remove {} \
--license {} \
--maintainer {} \
--directories {} \
--directories {} \
--directories {} \
--description \"{}\"".format(
VENDOR,
PACKAGE_URL,
POSTINST_SCRIPT,
PREINST_SCRIPT,
POSTUNINST_SCRIPT,
PACKAGE_LICENSE,
MAINTAINER,
LOG_DIR,
DATA_DIR,
MAN_DIR,
DESCRIPTION)
for f in CONFIGURATION_FILES:
fpm_common_args += " --config-files {}".format(f)
targets = {
'influx' : './cmd/influx',
'influxd' : './cmd/influxd',
'influx_stress' : './cmd/influx_stress',
'influx_inspect' : './cmd/influx_inspect',
'influx_tsm' : './cmd/influx_tsm',
}
supported_builds = {
'darwin': [ "amd64" ],
'windows': [ "amd64" ],
'linux': [ "amd64", "i386", "armhf", "arm64", "armel", "static_i386", "static_amd64" ]
}
supported_packages = {
"darwin": [ "tar" ],
"linux": [ "deb", "rpm", "tar" ],
"windows": [ "zip" ],
}
################
#### InfluxDB Functions
################
def print_banner():
logging.info("""
___ __ _ ___ ___
|_ _|_ _ / _| |_ ___ _| \\| _ )
| || ' \\| _| | || \\ \\ / |) | _ \\
|___|_||_|_| |_|\\_,_/_\\_\\___/|___/
Build Script
""")
def create_package_fs(build_root):
"""Create a filesystem structure to mimic the package filesystem.
"""
logging.debug("Creating package filesystem at location: {}".format(build_root))
# Using [1:] for the path names due to them being absolute
# (will overwrite previous paths, per 'os.path.join' documentation)
dirs = [ INSTALL_ROOT_DIR[1:],
LOG_DIR[1:],
DATA_DIR[1:],
SCRIPT_DIR[1:],
CONFIG_DIR[1:],
LOGROTATE_DIR[1:],
MAN_DIR[1:] ]
for d in dirs:
os.makedirs(os.path.join(build_root, d))
os.chmod(os.path.join(build_root, d), 0o755)
def package_scripts(build_root, config_only=False, windows=False):
"""Copy the necessary scripts and configuration files to the package
filesystem.
"""
if config_only:
logging.debug("Copying configuration to build directory.")
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "influxdb.conf"))
os.chmod(os.path.join(build_root, "influxdb.conf"), 0o644)
else:
logging.debug("Copying scripts and sample configuration to build directory.")
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb"))
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb"), 0o644)
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf"))
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf"), 0o644)
def package_man_files(build_root):
"""Copy and gzip man pages to the package filesystem."""
logging.debug("Installing man pages.")
run("make -C man/ clean install DESTDIR={}/usr".format(build_root))
for path, dir, files in os.walk(os.path.join(build_root, MAN_DIR[1:])):
for f in files:
run("gzip -9n {}".format(os.path.join(path, f)))
def run_generate():
"""Run 'go generate' to rebuild any static assets.
"""
logging.info("Running 'go generate'...")
if not check_path_for("statik"):
run("go install github.com/rakyll/statik")
orig_path = None
if os.path.join(os.environ.get("GOPATH"), "bin") not in os.environ["PATH"].split(os.pathsep):
orig_path = os.environ["PATH"].split(os.pathsep)
os.environ["PATH"] = os.environ["PATH"].split(os.pathsep).append(os.path.join(os.environ.get("GOPATH"), "bin"))
run("rm -f ./services/admin/statik/statik.go")
run("go generate ./services/admin")
if orig_path is not None:
os.environ["PATH"] = orig_path
return True
def go_get(branch, update=False, no_uncommitted=False):
"""Retrieve build dependencies or restore pinned dependencies.
"""
if local_changes() and no_uncommitted:
logging.error("There are uncommitted changes in the current directory.")
return False
if not check_path_for("gdm"):
logging.info("Downloading `gdm`...")
get_command = "go get github.com/sparrc/gdm"
run(get_command)
logging.info("Retrieving dependencies with `gdm`...")
sys.stdout.flush()
run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH")))
return True
def run_tests(race, parallel, timeout, no_vet):
"""Run the Go test suite on binary output.
"""
logging.info("Starting tests...")
if race:
logging.info("Race is enabled.")
if parallel is not None:
logging.info("Using parallel: {}".format(parallel))
if timeout is not None:
logging.info("Using timeout: {}".format(timeout))
out = run("go fmt ./...")
if len(out) > 0:
logging.error("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.")
logging.error("{}".format(out))
return False
if not no_vet:
logging.info("Running 'go vet'...")
out = run(go_vet_command)
if len(out) > 0:
logging.error("Go vet failed. Please run 'go vet ./...' and fix any errors.")
logging.error("{}".format(out))
return False
else:
logging.info("Skipping 'go vet' call...")
test_command = "go test -v"
if race:
test_command += " -race"
if parallel is not None:
test_command += " -parallel {}".format(parallel)
if timeout is not None:
test_command += " -timeout {}".format(timeout)
test_command += " ./..."
logging.info("Running tests...")
output = run(test_command)
logging.debug("Test output:\n{}".format(output.encode('ascii', 'ignore')))
return True
################
#### All InfluxDB-specific content above this line
################
def run(command, allow_failure=False, shell=False):
"""Run shell command (convenience wrapper around subprocess).
"""
out = None
logging.debug("{}".format(command))
try:
if shell:
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)
else:
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
out = out.decode('utf-8').strip()
# logging.debug("Command output: {}".format(out))
except subprocess.CalledProcessError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e.output))
return None
else:
logging.error("Command '{}' failed with error: {}".format(command, e.output))
sys.exit(1)
except OSError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e))
return out
else:
logging.error("Command '{}' failed with error: {}".format(command, e))
sys.exit(1)
else:
return out
def create_temp_dir(prefix = None):
""" Create temporary directory with optional prefix.
"""
if prefix is None:
return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME))
else:
return tempfile.mkdtemp(prefix=prefix)
def increment_minor_version(version):
"""Return the version with the minor version incremented and patch
version set to zero.
"""
ver_list = version.split('.')
if len(ver_list) != 3:
logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version))
return version
ver_list[1] = str(int(ver_list[1]) + 1)
ver_list[2] = str(0)
inc_version = '.'.join(ver_list)
logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version))
return inc_version
def get_current_version_tag():
"""Retrieve the raw git version tag.
"""
version = run("git describe --always --tags --abbrev=0")
return version
def get_current_version():
"""Parse version information from git tag output.
"""
version_tag = get_current_version_tag()
# Remove leading 'v'
if version_tag[0] == 'v':
version_tag = version_tag[1:]
# Replace any '-'/'_' with '~'
if '-' in version_tag:
version_tag = version_tag.replace("-","~")
if '_' in version_tag:
version_tag = version_tag.replace("_","~")
return version_tag
def get_current_commit(short=False):
"""Retrieve the current git commit.
"""
command = None
if short:
command = "git log --pretty=format:'%h' -n 1"
else:
command = "git rev-parse HEAD"
out = run(command)
return out.strip('\'\n\r ')
def get_current_branch():
"""Retrieve the current git branch.
"""
command = "git rev-parse --abbrev-ref HEAD"
out = run(command)
return out.strip()
def local_changes():
"""Return True if there are local un-committed changes.
"""
output = run("git diff-files --ignore-submodules --").strip()
if len(output) > 0:
return True
return False
def get_system_arch():
"""Retrieve current system architecture.
"""
arch = os.uname()[4]
if arch == "x86_64":
arch = "amd64"
elif arch == "386":
arch = "i386"
elif 'arm' in arch:
# Prevent uname from reporting full ARM arch (eg 'armv7l')
arch = "arm"
return arch
def get_system_platform():
"""Retrieve current system platform.
"""
if sys.platform.startswith("linux"):
return "linux"
else:
return sys.platform
def get_go_version():
"""Retrieve version information for Go.
"""
out = run("go version")
matches = re.search('go version go(\S+)', out)
if matches is not None:
return matches.groups()[0].strip()
return None
def check_path_for(b):
"""Check the the user's path for the provided binary.
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
full_path = os.path.join(path, b)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
def check_environ(build_dir = None):
"""Check environment for common Go variables.
"""
logging.info("Checking environment...")
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
logging.debug("Using '{}' for {}".format(os.environ.get(v), v))
cwd = os.getcwd()
if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.")
return True
def check_prereqs():
"""Check user path for required dependencies.
"""
logging.info("Checking for dependencies...")
for req in prereqs:
if not check_path_for(req):
logging.error("Could not find dependency: {}".format(req))
return False
return True
def upload_packages(packages, bucket_name=None, overwrite=False):
"""Upload provided package output to AWS S3.
"""
logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages))
try:
import boto
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
logging.getLogger("boto").setLevel(logging.WARNING)
except ImportError:
logging.warn("Cannot upload packages without 'boto' Python library!")
return False
logging.info("Connecting to AWS S3...")
# Up the number of attempts to 10 from default of 1
boto.config.add_section("Boto")
boto.config.set("Boto", "metadata_service_num_attempts", "10")
c = boto.connect_s3(calling_format=OrdinaryCallingFormat())
if bucket_name is None:
bucket_name = DEFAULT_BUCKET
bucket = c.get_bucket(bucket_name.split('/')[0])
for p in packages:
if '/' in bucket_name:
# Allow for nested paths within the bucket name (ex:
# bucket/folder). Assuming forward-slashes as path
# delimiter.
name = os.path.join('/'.join(bucket_name.split('/')[1:]),
os.path.basename(p))
else:
name = os.path.basename(p)
logging.debug("Using key: {}".format(name))
if bucket.get_key(name) is None or overwrite:
logging.info("Uploading file {}".format(name))
k = Key(bucket)
k.key = name
if overwrite:
n = k.set_contents_from_filename(p, replace=True)
else:
n = k.set_contents_from_filename(p, replace=False)
k.make_public()
else:
logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name))
return True
def go_list(vendor=False, relative=False):
"""
Return a list of packages
If vendor is False vendor package are not included
If relative is True the package prefix defined by PACKAGE_URL is stripped
"""
p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
packages = out.split('\n')
if packages[-1] == '':
packages = packages[:-1]
if not vendor:
non_vendor = []
for p in packages:
if '/vendor/' not in p:
non_vendor.append(p)
packages = non_vendor
if relative:
relative_pkgs = []
for p in packages:
r = p.replace(PACKAGE_URL, '.')
if r != '.':
relative_pkgs.append(r)
packages = relative_pkgs
return packages
def build(version=None,
platform=None,
arch=None,
nightly=False,
race=False,
clean=False,
outdir=".",
tags=[],
static=False):
"""Build each target for the specified architecture and platform.
"""
logging.info("Starting build for {}/{}...".format(platform, arch))
logging.info("Using Go version: {}".format(get_go_version()))
logging.info("Using git branch: {}".format(get_current_branch()))
logging.info("Using git commit: {}".format(get_current_commit()))
if static:
logging.info("Using statically-compiled output.")
if race:
logging.info("Race is enabled.")
if len(tags) > 0:
logging.info("Using build tags: {}".format(','.join(tags)))
logging.info("Sending build output to: {}".format(outdir))
if not os.path.exists(outdir):
os.makedirs(outdir)
elif clean and outdir != '/' and outdir != ".":
logging.info("Cleaning build directory '{}' before building.".format(outdir))
shutil.rmtree(outdir)
os.makedirs(outdir)
logging.info("Using version '{}' for build.".format(version))
for target, path in targets.items():
logging.info("Building target: {}".format(target))
build_command = ""
# Handle static binary output
if static is True or "static_" in arch:
if "static_" in arch:
static = True
arch = arch.replace("static_", "")
build_command += "CGO_ENABLED=0 "
# Handle variations in architecture output
if arch == "i386" or arch == "i686":
arch = "386"
elif "arm" in arch:
arch = "arm"
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
if "arm" in arch:
if arch == "armel":
build_command += "GOARM=5 "
elif arch == "armhf" or arch == "arm":
build_command += "GOARM=6 "
elif arch == "arm64":
# TODO(rossmcdonald) - Verify this is the correct setting for arm64
build_command += "GOARM=7 "
else:
logging.error("Invalid ARM architecture specified: {}".format(arch))
logging.error("Please specify either 'armel', 'armhf', or 'arm64'.")
return False
if platform == 'windows':
target = target + '.exe'
build_command += "go build -o {} ".format(os.path.join(outdir, target))
if race:
build_command += "-race "
if len(tags) > 0:
build_command += "-tags {} ".format(','.join(tags))
if "1.4" in get_go_version():
if static:
build_command += "-ldflags=\"-s -X main.version {} -X main.branch {} -X main.commit {}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
build_command += "-ldflags=\"-X main.version {} -X main.branch {} -X main.commit {}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
# Starting with Go 1.5, the linker flag arguments changed to 'name=value' from 'name value'
if static:
build_command += "-ldflags=\"-s -X main.version={} -X main.branch={} -X main.commit={}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
build_command += "-ldflags=\"-X main.version={} -X main.branch={} -X main.commit={}\" ".format(version,
get_current_branch(),
get_current_commit())
if static:
build_command += "-a -installsuffix cgo "
build_command += path
start_time = datetime.utcnow()
run(build_command, shell=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
def generate_md5_from_file(path):
"""Generate MD5 signature based on the contents of the file at path.
"""
m = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
m.update(chunk)
return m.hexdigest()
def generate_sig_from_file(path):
"""Generate a detached GPG signature from the file at path.
"""
logging.debug("Generating GPG signature for file: {}".format(path))
gpg_path = check_path_for('gpg')
if gpg_path is None:
logging.warn("gpg binary not found on path! Skipping signature creation.")
return False
if os.environ.get("GNUPG_HOME") is not None:
run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path))
else:
run('gpg --armor --detach-sign --yes {}'.format(path))
return True
def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False):
"""Package the output of the build process.
"""
outfiles = []
tmp_build_dir = create_temp_dir()
logging.debug("Packaging for build output: {}".format(build_output))
logging.info("Using temporary directory: {}".format(tmp_build_dir))
try:
for platform in build_output:
# Create top-level folder displaying which platform (linux, etc)
os.makedirs(os.path.join(tmp_build_dir, platform))
for arch in build_output[platform]:
logging.info("Creating packages for {}/{}".format(platform, arch))
# Create second-level directory displaying the architecture (amd64, etc)
current_location = build_output[platform][arch]
# Create directory tree to mimic file system of package
build_root = os.path.join(tmp_build_dir,
platform,
arch,
'{}-{}-{}'.format(PACKAGE_NAME, version, iteration))
os.makedirs(build_root)
# Copy packaging scripts to build directory
if platform == "windows":
# For windows and static builds, just copy
# binaries to root of package (no other scripts or
# directories)
package_scripts(build_root, config_only=True, windows=True)
elif static or "static_" in arch:
package_scripts(build_root, config_only=True)
else:
create_package_fs(build_root)
package_scripts(build_root)
if platform != "windows":
package_man_files(build_root)
for binary in targets:
# Copy newly-built binaries to packaging directory
if platform == 'windows':
binary = binary + '.exe'
if platform == 'windows' or static or "static_" in arch:
# Where the binary should go in the package filesystem
to = os.path.join(build_root, binary)
# Where the binary currently is located
fr = os.path.join(current_location, binary)
else:
# Where the binary currently is located
fr = os.path.join(current_location, binary)
# Where the binary should go in the package filesystem
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary)
shutil.copy(fr, to)
for package_type in supported_packages[platform]:
# Package the directory structure for each package type for the platform
logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type))
name = pkg_name
# Reset version, iteration, and current location on each run
# since they may be modified below.
package_version = version
package_iteration = iteration
if "static_" in arch:
# Remove the "static_" from the displayed arch on the package
package_arch = arch.replace("static_", "")
else:
package_arch = arch
if not release and not nightly:
# For non-release builds, just use the commit hash as the version
package_version = "{}~{}".format(version,
get_current_commit(short=True))
package_iteration = "0"
package_build_root = build_root
current_location = build_output[platform][arch]
if package_type in ['zip', 'tar']:
# For tars and zips, start the packaging one folder above
# the build root (to include the package name)
package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1]))
if nightly:
if static or "static_" in arch:
name = '{}-static-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
name = '{}-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
if static or "static_" in arch:
name = '{}-{}-static_{}_{}'.format(name,
package_version,
platform,
package_arch)
else:
name = '{}-{}_{}_{}'.format(name,
package_version,
platform,
package_arch)
current_location = os.path.join(os.getcwd(), current_location)
if package_type == 'tar':
tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(package_build_root, name)
run(tar_command, shell=True)
run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".tar.gz")
outfiles.append(outfile)
elif package_type == 'zip':
zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name)
run(zip_command, shell=True)
run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".zip")
outfiles.append(outfile)
elif package_type not in ['zip', 'tar'] and static or "static_" in arch:
logging.info("Skipping package type '{}' for static builds.".format(package_type))
else:
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
fpm_common_args,
name,
package_arch,
package_type,
package_version,
package_iteration,
package_build_root,
current_location)
if package_type == "rpm":
fpm_command += "--depends coreutils --rpm-posttrans {}".format(POSTINST_SCRIPT)
out = run(fpm_command, shell=True)
matches = re.search(':path=>"(.*)"', out)
outfile = None
if matches is not None:
outfile = matches.groups()[0]
if outfile is None:
logging.warn("Could not determine output from packaging output!")
else:
if nightly:
# Strip nightly version from package name
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly")
os.rename(outfile, new_outfile)
outfile = new_outfile
else:
if package_type == 'rpm':
# rpm's convert any dashes to underscores
package_version = package_version.replace("-", "_")
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), package_version)
os.rename(outfile, new_outfile)
outfile = new_outfile
outfiles.append(os.path.join(os.getcwd(), outfile))
logging.debug("Produced package files: {}".format(outfiles))
return outfiles
finally:
# Cleanup
shutil.rmtree(tmp_build_dir)
def main(args):
global PACKAGE_NAME
if args.release and args.nightly:
logging.error("Cannot be both a nightly and a release.")
return 1
if args.nightly:
args.version = increment_minor_version(args.version)
args.version = "{}~n{}".format(args.version,
datetime.utcnow().strftime("%Y%m%d%H%M"))
args.iteration = 0
# Pre-build checks
check_environ()
if not check_prereqs():
return 1
if args.build_tags is None:
args.build_tags = []
else:
args.build_tags = args.build_tags.split(',')
orig_commit = get_current_commit(short=True)
orig_branch = get_current_branch()
if args.platform not in supported_builds and args.platform != 'all':
logging.error("Invalid build platform: {}".format(target_platform))
return 1
build_output = {}
if args.branch != orig_branch and args.commit != orig_commit:
logging.error("Can only specify one branch or commit to build from.")
return 1
elif args.branch != orig_branch:
logging.info("Moving to git branch: {}".format(args.branch))
run("git checkout {}".format(args.branch))
elif args.commit != orig_commit:
logging.info("Moving to git commit: {}".format(args.commit))
run("git checkout {}".format(args.commit))
if not args.no_get:
if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted):
return 1
if args.generate:
if not run_generate():
return 1
if args.test:
if not run_tests(args.race, args.parallel, args.timeout, args.no_vet):
return 1
platforms = []
single_build = True
if args.platform == 'all':
platforms = supported_builds.keys()
single_build = False
else:
platforms = [args.platform]
for platform in platforms:
build_output.update( { platform : {} } )
archs = []
if args.arch == "all":
single_build = False
archs = supported_builds.get(platform)
else:
archs = [args.arch]
for arch in archs:
od = args.outdir
if not single_build:
od = os.path.join(args.outdir, platform, arch)
if not build(version=args.version,
platform=platform,
arch=arch,
nightly=args.nightly,
race=args.race,
clean=args.clean,
outdir=od,
tags=args.build_tags,
static=args.static):
return 1
build_output.get(platform).update( { arch : od } )
# Build packages
if args.package:
if not check_path_for("fpm"):
logging.error("FPM ruby gem required for packaging. Stopping.")
return 1
packages = package(build_output,
args.name,
args.version,
nightly=args.nightly,
iteration=args.iteration,
static=args.static,
release=args.release)
if args.sign:
logging.debug("Generating GPG signatures for packages: {}".format(packages))
sigs = [] # retain signatures so they can be uploaded with packages
for p in packages:
if generate_sig_from_file(p):
sigs.append(p + '.asc')
else:
logging.error("Creation of signature for package [{}] failed!".format(p))
return 1
packages += sigs
if args.upload:
logging.debug("Files staged for upload: {}".format(packages))
if args.nightly:
args.upload_overwrite = True
if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite):
return 1
logging.info("Packages created:")
for p in packages:
logging.info("{} (MD5={})".format(p.split('/')[-1:][0],
generate_md5_from_file(p)))
if orig_branch != get_current_branch():
logging.info("Moving back to original git branch: {}".format(orig_branch))
run("git checkout {}".format(orig_branch))
return 0
if __name__ == '__main__':
LOG_LEVEL = logging.INFO
if '--debug' in sys.argv[1:]:
LOG_LEVEL = logging.DEBUG
log_format = '[%(levelname)s] %(funcName)s: %(message)s'
logging.basicConfig(level=LOG_LEVEL,
format=log_format)
parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.')
parser.add_argument('--verbose','-v','--debug',
action='store_true',
help='Use debug output')
parser.add_argument('--outdir', '-o',
metavar='<output directory>',
default='./build/',
type=os.path.abspath,
help='Output directory')
parser.add_argument('--name', '-n',
metavar='<name>',
default=PACKAGE_NAME,
type=str,
help='Name to use for package name (when package is specified)')
parser.add_argument('--arch',
metavar='<amd64|i386|armhf|arm64|armel|all>',
type=str,
default=get_system_arch(),
help='Target architecture for build output')
parser.add_argument('--platform',
metavar='<linux|darwin|windows|all>',
type=str,
default=get_system_platform(),
help='Target platform for build output')
parser.add_argument('--branch',
metavar='<branch>',
type=str,
default=get_current_branch(),
help='Build from a specific branch')
parser.add_argument('--commit',
metavar='<commit>',
type=str,
default=get_current_commit(short=True),
help='Build from a specific commit')
parser.add_argument('--version',
metavar='<version>',
type=str,
default=get_current_version(),
help='Version information to apply to build output (ex: 0.12.0)')
parser.add_argument('--iteration',
metavar='<package iteration>',
type=str,
default="1",
help='Package iteration to apply to build output (defaults to 1)')
parser.add_argument('--stats',
action='store_true',
help='Emit build metrics (requires InfluxDB Python client)')
parser.add_argument('--stats-server',
metavar='<hostname:port>',
type=str,
help='Send build stats to InfluxDB using provided hostname and port')
parser.add_argument('--stats-db',
metavar='<database name>',
type=str,
help='Send build stats to InfluxDB using provided database name')
parser.add_argument('--nightly',
action='store_true',
help='Mark build output as nightly build (will incremement the minor version)')
parser.add_argument('--update',
action='store_true',
help='Update build dependencies prior to building')
parser.add_argument('--package',
action='store_true',
help='Package binary output')
parser.add_argument('--release',
action='store_true',
help='Mark build output as release')
parser.add_argument('--clean',
action='store_true',
help='Clean output directory before building')
parser.add_argument('--no-get',
action='store_true',
help='Do not retrieve pinned dependencies when building')
parser.add_argument('--no-uncommitted',
action='store_true',
help='Fail if uncommitted changes exist in the working directory')
parser.add_argument('--upload',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--upload-overwrite','-w',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--bucket',
metavar='<S3 bucket name>',
type=str,
default=DEFAULT_BUCKET,
help='Destination bucket for uploads')
parser.add_argument('--generate',
action='store_true',
help='Run "go generate" before building')
parser.add_argument('--build-tags',
metavar='<tags>',
help='Optional build tags to use for compilation')
parser.add_argument('--static',
action='store_true',
help='Create statically-compiled binary output')
parser.add_argument('--sign',
action='store_true',
help='Create GPG detached signatures for packages (when package is specified)')
parser.add_argument('--test',
action='store_true',
help='Run tests (does not produce build output)')
parser.add_argument('--no-vet',
action='store_true',
help='Do not run "go vet" when running tests')
parser.add_argument('--race',
action='store_true',
help='Enable race flag for build output')
parser.add_argument('--parallel',
metavar='<num threads>',
type=int,
help='Number of tests to run simultaneously')
parser.add_argument('--timeout',
metavar='<timeout>',
type=str,
help='Timeout for tests before failing')
args = parser.parse_args()
print_banner()
sys.exit(main(args))
|
apache-2.0
| -5,557,625,705,091,301,000 | 4,181,479,626,969,858,600 | 40.657868 | 135 | 0.519362 | false |
prymatex/SublimeCodeIntel
|
libs/codeintel2/perlcile.py
|
7
|
8773
|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
#
# Contributors:
# Eric Promislow ([email protected])
"""
perlcile - a Code Intelligence Language Engine for the Perl language
Module Usage:
from perlcile import scan_purelang
content = open("foo.pl", "r").read()
scan_purelang(content, "foo.pl")
Command-line Usage:
perlcile.py [<options>...] [<Perl file>]
Options:
-h, --help dump this help and exit
-V, --version dump this script's version and exit
-v, --verbose verbose output, use twice for more verbose output
-f, --filename <path> specify the filename of the file content
passed in on stdin, this is used for the "path"
attribute of the emitted <file> tag.
--md5=<string> md5 hash for the input
--mtime=<secs> modification time for output info, in #secs since
1/1/70.
-L, --language <name>
the language of the file being scanned
-c, --clock print timing info for scans (CIX is not printed)
One or more Perl files can be specified as arguments or content can be
passed in on stdin. A directory can also be specified, in which case
all .pl files in that directory are scanned.
This is a Language Engine for the Code Intelligence (codeintel) system.
Code Intelligence XML format. See:
http://specs.activestate.com/Komodo_3.0/func/code_intelligence.html
http://specs.tl.activestate.com/kd/kd-0100.html
The command-line interface will return non-zero iff the scan failed.
"""
import os
import os.path
import sys
import getopt
from hashlib import md5
import re
import logging
import glob
import time
import stat
from ciElementTree import Element, SubElement, tostring
from SilverCity import ScintillaConstants
from codeintel2 import perl_lexer, perl_parser, util
from codeintel2.tree import pretty_tree_from_tree
from codeintel2.common import CILEError
from codeintel2 import parser_cix
#---- global data
_version_ = (0, 1, 0)
log = logging.getLogger("perlcile")
# log.setLevel(logging.DEBUG)
_gClockIt = 0 # if true then we are gathering timing data
_gClock = None # if gathering timing data this is set to time retrieval fn
_gStartTime = None # start time of current file being scanned
gProvideFullDocs = False
#---- internal support
# This code has intimate knowledge of the code objects defined in
# perl_parser.py
def scan_purelang(buf):
content = buf.accessor.text.expandtabs(8)
tokenizer = perl_lexer.PerlLexer(content, gProvideFullDocs)
parser = perl_parser.Parser(tokenizer, provide_full_docs=gProvideFullDocs)
parser.moduleName = buf.path
parse_tree = parser.parse()
tree = parser.produce_CIX()
return tree
def scan_multilang(tokens, module_elem):
"""Build the Perl module CIX element tree.
"tokens" is a generator of UDL tokens for this UDL-based
multi-lang document.
"module_elem" is the <module> element of a CIX element tree on
which the Perl module should be built.
This should return a list of the CSL tokens in the token stream.
"""
tokenizer = perl_lexer.PerlMultiLangLexer(tokens)
# "PerlHTML" is about all we need for whichever Perl-based
# template language is being used. This could just as easily be a
# boolean that indicates whether we're processing a pure language
# or a multi-lang one.
parser = perl_parser.Parser(
tokenizer, lang="PerlHTML", provide_full_docs=gProvideFullDocs)
parser.moduleName = "" # Unknown
parser.parse()
parse_tree = parser.produce_CIX_NoHeader(module_elem)
csl_tokens = tokenizer.get_csl_tokens()
return csl_tokens, tokenizer.has_perl_code()
#---- mainline
def main(argv):
logging.basicConfig()
# Parse options.
try:
opts, args = getopt.getopt(argv[1:], "Vvhf:cL:",
["version", "verbose", "help", "filename=", "md5=", "mtime=",
"clock", "language="])
except getopt.GetoptError as ex:
log.error(str(ex))
log.error("Try `perlcile --help'.")
return 1
numVerboses = 0
stdinFilename = None
md5sum = None
mtime = None
lang = "Perl"
global _gClockIt
for opt, optarg in opts:
if opt in ("-h", "--help"):
sys.stdout.write(__doc__)
return
elif opt in ("-V", "--version"):
ver = '.'.join([str(part) for part in _version_])
print("perlcile %s" % ver)
return
elif opt in ("-v", "--verbose"):
numVerboses += 1
if numVerboses == 1:
log.setLevel(logging.INFO)
else:
log.setLevel(logging.DEBUG)
elif opt in ("-f", "--filename"):
stdinFilename = optarg
elif opt in ("-L", "--language"):
lang = optarg
elif opt in ("--md5",):
md5sum = optarg
elif opt in ("--mtime",):
mtime = optarg
elif opt in ("-c", "--clock"):
_gClockIt = 1
global _gClock
if sys.platform.startswith("win"):
_gClock = time.clock
else:
_gClock = time.time
if len(args) == 0:
contentOnStdin = 1
filenames = [stdinFilename or "<stdin>"]
else:
contentOnStdin = 0
paths = []
for arg in args:
paths += glob.glob(arg)
filenames = []
for path in paths:
if os.path.isfile(path):
filenames.append(path)
elif os.path.isdir(path):
perlfiles = [os.path.join(path, n) for n in os.listdir(path)
if os.path.splitext(n)[1] in (".pl", ".pm")]
perlfiles = [f for f in perlfiles if os.path.isfile(f)]
filenames += perlfiles
if 1:
for filename in filenames:
if contentOnStdin:
log.debug("reading content from stdin")
content = sys.stdin.read()
log.debug("finished reading content from stdin")
if mtime is None:
mtime = int(time.time())
else:
if mtime is None:
mtime = int(os.stat(filename)[stat.ST_MTIME])
content = open(filename, 'r').read()
if _gClockIt:
sys.stdout.write("scanning '%s'..." % filename)
global _gStartTime
_gStartTime = _gClock()
data = scan(
content, filename, md5sum=md5sum, mtime=mtime, lang=lang)
if _gClockIt:
sys.stdout.write(" %.3fs\n" % (_gClock()-_gStartTime))
elif data:
sys.stdout.write(data)
try:
pass
except KeyboardInterrupt:
log.debug("user abort")
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
gpl-2.0
| 4,706,880,071,459,455,000 | 8,485,691,505,226,321,000 | 34.518219 | 96 | 0.612447 | false |
postlund/home-assistant
|
script/scaffold/templates/config_flow_oauth2/integration/__init__.py
|
9
|
2558
|
"""The NEW_NAME integration."""
import asyncio
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.core import HomeAssistant
from homeassistant.helpers import (
aiohttp_client,
config_entry_oauth2_flow,
config_validation as cv,
)
from . import api, config_flow
from .const import DOMAIN, OAUTH2_AUTHORIZE, OAUTH2_TOKEN
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
# TODO List the platforms that you want to support.
# For your initial PR, limit it to 1 platform.
PLATFORMS = ["light"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the NEW_NAME component."""
hass.data[DOMAIN] = {}
if DOMAIN not in config:
return True
config_flow.OAuth2FlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
config[DOMAIN][CONF_CLIENT_ID],
config[DOMAIN][CONF_CLIENT_SECRET],
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
),
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up NEW_NAME from a config entry."""
implementation = await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
session = config_entry_oauth2_flow.OAuth2Session(hass, entry, implementation)
# If using a requests-based API lib
hass.data[DOMAIN][entry.entry_id] = api.ConfigEntryAuth(hass, entry, session)
# If using an aiohttp-based API lib
hass.data[DOMAIN][entry.entry_id] = api.AsyncConfigEntryAuth(
aiohttp_client.async_get_clientsession(hass), session
)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
apache-2.0
| -5,834,591,571,294,845,000 | -4,069,344,326,960,395,000 | 26.212766 | 90 | 0.645426 | false |
vaygr/ansible
|
contrib/inventory/stacki.py
|
39
|
6286
|
#!/usr/bin/env python
# Copyright (c) 2016, Hugh Ma <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
# Stacki inventory script
# Configure stacki.yml with proper auth information and place in the following:
# - ../inventory/stacki.yml
# - /etc/stacki/stacki.yml
# - /etc/ansible/stacki.yml
# The stacki.yml file can contain entries for authentication information
# regarding the Stacki front-end node.
#
# use_hostnames uses hostname rather than interface ip as connection
#
#
"""
Example Usage:
List Stacki Nodes
$ ./stack.py --list
Example Configuration:
---
stacki:
auth:
stacki_user: admin
stacki_password: abc12345678910
stacki_endpoint: http://192.168.200.50/stack
use_hostnames: false
"""
import argparse
import os
import sys
import yaml
from distutils.version import StrictVersion
try:
import json
except:
import simplejson as json
try:
import requests
except:
sys.exit('requests package is required for this inventory script')
CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml']
def stack_auth(params):
endpoint = params['stacki_endpoint']
auth_creds = {'USERNAME': params['stacki_user'],
'PASSWORD': params['stacki_password']}
client = requests.session()
client.get(endpoint)
init_csrf = client.cookies['csrftoken']
header = {'csrftoken': init_csrf, 'X-CSRFToken': init_csrf,
'Content-type': 'application/x-www-form-urlencoded'}
login_endpoint = endpoint + "/login"
login_req = client.post(login_endpoint, data=auth_creds, headers=header)
csrftoken = login_req.cookies['csrftoken']
sessionid = login_req.cookies['sessionid']
auth_creds.update(CSRFTOKEN=csrftoken, SESSIONID=sessionid)
return client, auth_creds
def stack_build_header(auth_creds):
header = {'csrftoken': auth_creds['CSRFTOKEN'],
'X-CSRFToken': auth_creds['CSRFTOKEN'],
'sessionid': auth_creds['SESSIONID'],
'Content-type': 'application/json'}
return header
def stack_host_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host"}),
headers=header)
return json.loads(stack_r.json())
def stack_net_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host interface"}),
headers=header)
return json.loads(stack_r.json())
def format_meta(hostdata, intfdata, config):
use_hostnames = config['use_hostnames']
meta = dict(all=dict(hosts=list()),
frontends=dict(hosts=list()),
backends=dict(hosts=list()),
_meta=dict(hostvars=dict()))
# Iterate through list of dicts of hosts and remove
# environment key as it causes conflicts
for host in hostdata:
del host['environment']
meta['_meta']['hostvars'][host['host']] = host
meta['_meta']['hostvars'][host['host']]['interfaces'] = list()
# @bbyhuy to improve readability in next iteration
for intf in intfdata:
if intf['host'] in meta['_meta']['hostvars']:
meta['_meta']['hostvars'][intf['host']]['interfaces'].append(intf)
if intf['default'] is True:
meta['_meta']['hostvars'][intf['host']]['ansible_host'] = intf['ip']
if not use_hostnames:
meta['all']['hosts'].append(intf['ip'])
if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
meta['backends']['hosts'].append(intf['ip'])
else:
meta['frontends']['hosts'].append(intf['ip'])
else:
meta['all']['hosts'].append(intf['host'])
if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
meta['backends']['hosts'].append(intf['host'])
else:
meta['frontends']['hosts'].append(intf['host'])
return meta
def parse_args():
parser = argparse.ArgumentParser(description='Stacki Inventory Module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active hosts')
group.add_argument('--host', help='List details about the specific host')
return parser.parse_args()
def main():
args = parse_args()
if StrictVersion(requests.__version__) < StrictVersion("2.4.3"):
sys.exit('requests>=2.4.3 is required for this inventory script')
try:
config_files = CONFIG_FILES
config_files.append(os.path.dirname(os.path.realpath(__file__)) + '/stacki.yml')
config = None
for cfg_file in config_files:
if os.path.isfile(cfg_file):
stream = open(cfg_file, 'r')
config = yaml.safe_load(stream)
break
if not config:
sys.stderr.write("No config file found at {0}\n".format(config_files))
sys.exit(1)
client, auth_creds = stack_auth(config['stacki']['auth'])
header = stack_build_header(auth_creds)
host_list = stack_host_list(config['stacki']['auth']['stacki_endpoint'], header, client)
intf_list = stack_net_list(config['stacki']['auth']['stacki_endpoint'], header, client)
final_meta = format_meta(host_list, intf_list, config)
print(json.dumps(final_meta, indent=4))
except Exception as e:
sys.stderr.write('%s\n' % e.message)
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
|
gpl-3.0
| 3,004,618,745,898,375,700 | -558,154,601,283,990,300 | 31.910995 | 96 | 0.619631 | false |
melund/wcwidth
|
wcwidth/table_zero.py
|
7
|
20001
|
"""Zero_Width table. Created by setup.py."""
# Generated: 2015-09-14T01:48:19.532217
# Source: DerivedGeneralCategory-8.0.0.txt
# Date: 2015-02-13, 13:47:11 GMT [MD]
ZERO_WIDTH = (
(0x0300, 0x036f,), # Combining Grave Accent ..Combining Latin Small Le
(0x0483, 0x0489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
(0x0591, 0x05bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
(0x05bf, 0x05bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
(0x05c1, 0x05c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
(0x05c4, 0x05c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
(0x05c7, 0x05c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
(0x0610, 0x061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
(0x064b, 0x065f,), # Arabic Fathatan ..Arabic Wavy Hamza Below
(0x0670, 0x0670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
(0x06d6, 0x06dc,), # Arabic Small High Ligatu..Arabic Small High Seen
(0x06df, 0x06e4,), # Arabic Small High Rounde..Arabic Small High Madda
(0x06e7, 0x06e8,), # Arabic Small High Yeh ..Arabic Small High Noon
(0x06ea, 0x06ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
(0x0711, 0x0711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
(0x0730, 0x074a,), # Syriac Pthaha Above ..Syriac Barrekh
(0x07a6, 0x07b0,), # Thaana Abafili ..Thaana Sukun
(0x07eb, 0x07f3,), # Nko Combining Short High..Nko Combining Double Dot
(0x0816, 0x0819,), # Samaritan Mark In ..Samaritan Mark Dagesh
(0x081b, 0x0823,), # Samaritan Mark Epentheti..Samaritan Vowel Sign A
(0x0825, 0x0827,), # Samaritan Vowel Sign Sho..Samaritan Vowel Sign U
(0x0829, 0x082d,), # Samaritan Vowel Sign Lon..Samaritan Mark Nequdaa
(0x0859, 0x085b,), # Mandaic Affrication Mark..Mandaic Gemination Mark
(0x08e3, 0x0902,), # Arabic Turned Damma Belo..Devanagari Sign Anusvara
(0x093a, 0x093a,), # Devanagari Vowel Sign Oe..Devanagari Vowel Sign Oe
(0x093c, 0x093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
(0x0941, 0x0948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
(0x094d, 0x094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
(0x0951, 0x0957,), # Devanagari Stress Sign U..Devanagari Vowel Sign Uu
(0x0962, 0x0963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
(0x0981, 0x0981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
(0x09bc, 0x09bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
(0x09c1, 0x09c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
(0x09cd, 0x09cd,), # Bengali Sign Virama ..Bengali Sign Virama
(0x09e2, 0x09e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
(0x0a01, 0x0a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
(0x0a3c, 0x0a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
(0x0a41, 0x0a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
(0x0a47, 0x0a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
(0x0a4b, 0x0a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
(0x0a51, 0x0a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
(0x0a70, 0x0a71,), # Gurmukhi Tippi ..Gurmukhi Addak
(0x0a75, 0x0a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
(0x0a81, 0x0a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
(0x0abc, 0x0abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
(0x0ac1, 0x0ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
(0x0ac7, 0x0ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
(0x0acd, 0x0acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
(0x0ae2, 0x0ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
(0x0b01, 0x0b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
(0x0b3c, 0x0b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
(0x0b3f, 0x0b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
(0x0b41, 0x0b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
(0x0b4d, 0x0b4d,), # Oriya Sign Virama ..Oriya Sign Virama
(0x0b56, 0x0b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
(0x0b62, 0x0b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
(0x0b82, 0x0b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
(0x0bc0, 0x0bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
(0x0bcd, 0x0bcd,), # Tamil Sign Virama ..Tamil Sign Virama
(0x0c00, 0x0c00,), # Telugu Sign Combining Ca..Telugu Sign Combining Ca
(0x0c3e, 0x0c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
(0x0c46, 0x0c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
(0x0c4a, 0x0c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
(0x0c55, 0x0c56,), # Telugu Length Mark ..Telugu Ai Length Mark
(0x0c62, 0x0c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
(0x0c81, 0x0c81,), # Kannada Sign Candrabindu..Kannada Sign Candrabindu
(0x0cbc, 0x0cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
(0x0cbf, 0x0cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
(0x0cc6, 0x0cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
(0x0ccc, 0x0ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
(0x0ce2, 0x0ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
(0x0d01, 0x0d01,), # Malayalam Sign Candrabin..Malayalam Sign Candrabin
(0x0d41, 0x0d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
(0x0d4d, 0x0d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
(0x0d62, 0x0d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
(0x0dca, 0x0dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
(0x0dd2, 0x0dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
(0x0dd6, 0x0dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
(0x0e31, 0x0e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
(0x0e34, 0x0e3a,), # Thai Character Sara I ..Thai Character Phinthu
(0x0e47, 0x0e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
(0x0eb1, 0x0eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
(0x0eb4, 0x0eb9,), # Lao Vowel Sign I ..Lao Vowel Sign Uu
(0x0ebb, 0x0ebc,), # Lao Vowel Sign Mai Kon ..Lao Semivowel Sign Lo
(0x0ec8, 0x0ecd,), # Lao Tone Mai Ek ..Lao Niggahita
(0x0f18, 0x0f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
(0x0f35, 0x0f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
(0x0f37, 0x0f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
(0x0f39, 0x0f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
(0x0f71, 0x0f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
(0x0f80, 0x0f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
(0x0f86, 0x0f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
(0x0f8d, 0x0f97,), # Tibetan Subjoined Sign L..Tibetan Subjoined Letter
(0x0f99, 0x0fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
(0x0fc6, 0x0fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
(0x102d, 0x1030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
(0x1032, 0x1037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
(0x1039, 0x103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
(0x103d, 0x103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
(0x1058, 0x1059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
(0x105e, 0x1060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
(0x1071, 0x1074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
(0x1082, 0x1082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
(0x1085, 0x1086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
(0x108d, 0x108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
(0x109d, 0x109d,), # Myanmar Vowel Sign Aiton..Myanmar Vowel Sign Aiton
(0x135d, 0x135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
(0x1712, 0x1714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
(0x1732, 0x1734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
(0x1752, 0x1753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
(0x1772, 0x1773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
(0x17b4, 0x17b5,), # Khmer Vowel Inherent Aq ..Khmer Vowel Inherent Aa
(0x17b7, 0x17bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
(0x17c6, 0x17c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
(0x17c9, 0x17d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
(0x17dd, 0x17dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
(0x180b, 0x180d,), # Mongolian Free Variation..Mongolian Free Variation
(0x18a9, 0x18a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
(0x1920, 0x1922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
(0x1927, 0x1928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
(0x1932, 0x1932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
(0x1939, 0x193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
(0x1a17, 0x1a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
(0x1a1b, 0x1a1b,), # Buginese Vowel Sign Ae ..Buginese Vowel Sign Ae
(0x1a56, 0x1a56,), # Tai Tham Consonant Sign ..Tai Tham Consonant Sign
(0x1a58, 0x1a5e,), # Tai Tham Sign Mai Kang L..Tai Tham Consonant Sign
(0x1a60, 0x1a60,), # Tai Tham Sign Sakot ..Tai Tham Sign Sakot
(0x1a62, 0x1a62,), # Tai Tham Vowel Sign Mai ..Tai Tham Vowel Sign Mai
(0x1a65, 0x1a6c,), # Tai Tham Vowel Sign I ..Tai Tham Vowel Sign Oa B
(0x1a73, 0x1a7c,), # Tai Tham Vowel Sign Oa A..Tai Tham Sign Khuen-lue
(0x1a7f, 0x1a7f,), # Tai Tham Combining Crypt..Tai Tham Combining Crypt
(0x1ab0, 0x1abe,), # Combining Doubled Circum..Combining Parentheses Ov
(0x1b00, 0x1b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
(0x1b34, 0x1b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
(0x1b36, 0x1b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
(0x1b3c, 0x1b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
(0x1b42, 0x1b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
(0x1b6b, 0x1b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
(0x1b80, 0x1b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
(0x1ba2, 0x1ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
(0x1ba8, 0x1ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
(0x1bab, 0x1bad,), # Sundanese Sign Virama ..Sundanese Consonant Sign
(0x1be6, 0x1be6,), # Batak Sign Tompi ..Batak Sign Tompi
(0x1be8, 0x1be9,), # Batak Vowel Sign Pakpak ..Batak Vowel Sign Ee
(0x1bed, 0x1bed,), # Batak Vowel Sign Karo O ..Batak Vowel Sign Karo O
(0x1bef, 0x1bf1,), # Batak Vowel Sign U For S..Batak Consonant Sign H
(0x1c2c, 0x1c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
(0x1c36, 0x1c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
(0x1cd0, 0x1cd2,), # Vedic Tone Karshana ..Vedic Tone Prenkha
(0x1cd4, 0x1ce0,), # Vedic Sign Yajurvedic Mi..Vedic Tone Rigvedic Kash
(0x1ce2, 0x1ce8,), # Vedic Sign Visarga Svari..Vedic Sign Visarga Anuda
(0x1ced, 0x1ced,), # Vedic Sign Tiryak ..Vedic Sign Tiryak
(0x1cf4, 0x1cf4,), # Vedic Tone Candra Above ..Vedic Tone Candra Above
(0x1cf8, 0x1cf9,), # Vedic Tone Ring Above ..Vedic Tone Double Ring A
(0x1dc0, 0x1df5,), # Combining Dotted Grave A..Combining Up Tack Above
(0x1dfc, 0x1dff,), # Combining Double Inverte..Combining Right Arrowhea
(0x20d0, 0x20f0,), # Combining Left Harpoon A..Combining Asterisk Above
(0x2cef, 0x2cf1,), # Coptic Combining Ni Abov..Coptic Combining Spiritu
(0x2d7f, 0x2d7f,), # Tifinagh Consonant Joine..Tifinagh Consonant Joine
(0x2de0, 0x2dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
(0x302a, 0x302d,), # Ideographic Level Tone M..Ideographic Entering Ton
(0x3099, 0x309a,), # Combining Katakana-hirag..Combining Katakana-hirag
(0xa66f, 0xa672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
(0xa674, 0xa67d,), # Combining Cyrillic Lette..Combining Cyrillic Payer
(0xa69e, 0xa69f,), # Combining Cyrillic Lette..Combining Cyrillic Lette
(0xa6f0, 0xa6f1,), # Bamum Combining Mark Koq..Bamum Combining Mark Tuk
(0xa802, 0xa802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
(0xa806, 0xa806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
(0xa80b, 0xa80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
(0xa825, 0xa826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
(0xa8c4, 0xa8c4,), # Saurashtra Sign Virama ..Saurashtra Sign Virama
(0xa8e0, 0xa8f1,), # Combining Devanagari Dig..Combining Devanagari Sig
(0xa926, 0xa92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
(0xa947, 0xa951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
(0xa980, 0xa982,), # Javanese Sign Panyangga ..Javanese Sign Layar
(0xa9b3, 0xa9b3,), # Javanese Sign Cecak Telu..Javanese Sign Cecak Telu
(0xa9b6, 0xa9b9,), # Javanese Vowel Sign Wulu..Javanese Vowel Sign Suku
(0xa9bc, 0xa9bc,), # Javanese Vowel Sign Pepe..Javanese Vowel Sign Pepe
(0xa9e5, 0xa9e5,), # Myanmar Sign Shan Saw ..Myanmar Sign Shan Saw
(0xaa29, 0xaa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
(0xaa31, 0xaa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
(0xaa35, 0xaa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
(0xaa43, 0xaa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
(0xaa4c, 0xaa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
(0xaa7c, 0xaa7c,), # Myanmar Sign Tai Laing T..Myanmar Sign Tai Laing T
(0xaab0, 0xaab0,), # Tai Viet Mai Kang ..Tai Viet Mai Kang
(0xaab2, 0xaab4,), # Tai Viet Vowel I ..Tai Viet Vowel U
(0xaab7, 0xaab8,), # Tai Viet Mai Khit ..Tai Viet Vowel Ia
(0xaabe, 0xaabf,), # Tai Viet Vowel Am ..Tai Viet Tone Mai Ek
(0xaac1, 0xaac1,), # Tai Viet Tone Mai Tho ..Tai Viet Tone Mai Tho
(0xaaec, 0xaaed,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
(0xaaf6, 0xaaf6,), # Meetei Mayek Virama ..Meetei Mayek Virama
(0xabe5, 0xabe5,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
(0xabe8, 0xabe8,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
(0xabed, 0xabed,), # Meetei Mayek Apun Iyek ..Meetei Mayek Apun Iyek
(0xfb1e, 0xfb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
(0xfe00, 0xfe0f,), # Variation Selector-1 ..Variation Selector-16
(0xfe20, 0xfe2f,), # Combining Ligature Left ..Combining Cyrillic Titlo
(0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
(0x102e0, 0x102e0,), # Coptic Epact Thousands M..Coptic Epact Thousands M
(0x10376, 0x1037a,), # Combining Old Permic Let..Combining Old Permic Let
(0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
(0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
(0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
(0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
(0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
(0x10ae5, 0x10ae6,), # Manichaean Abbreviation ..Manichaean Abbreviation
(0x11001, 0x11001,), # Brahmi Sign Anusvara ..Brahmi Sign Anusvara
(0x11038, 0x11046,), # Brahmi Vowel Sign Aa ..Brahmi Virama
(0x1107f, 0x11081,), # Brahmi Number Joiner ..Kaithi Sign Anusvara
(0x110b3, 0x110b6,), # Kaithi Vowel Sign U ..Kaithi Vowel Sign Ai
(0x110b9, 0x110ba,), # Kaithi Sign Virama ..Kaithi Sign Nukta
(0x11100, 0x11102,), # Chakma Sign Candrabindu ..Chakma Sign Visarga
(0x11127, 0x1112b,), # Chakma Vowel Sign A ..Chakma Vowel Sign Uu
(0x1112d, 0x11134,), # Chakma Vowel Sign Ai ..Chakma Maayyaa
(0x11173, 0x11173,), # Mahajani Sign Nukta ..Mahajani Sign Nukta
(0x11180, 0x11181,), # Sharada Sign Candrabindu..Sharada Sign Anusvara
(0x111b6, 0x111be,), # Sharada Vowel Sign U ..Sharada Vowel Sign O
(0x111ca, 0x111cc,), # Sharada Sign Nukta ..Sharada Extra Short Vowe
(0x1122f, 0x11231,), # Khojki Vowel Sign U ..Khojki Vowel Sign Ai
(0x11234, 0x11234,), # Khojki Sign Anusvara ..Khojki Sign Anusvara
(0x11236, 0x11237,), # Khojki Sign Nukta ..Khojki Sign Shadda
(0x112df, 0x112df,), # Khudawadi Sign Anusvara ..Khudawadi Sign Anusvara
(0x112e3, 0x112ea,), # Khudawadi Vowel Sign U ..Khudawadi Sign Virama
(0x11300, 0x11301,), # Grantha Sign Combining A..Grantha Sign Candrabindu
(0x1133c, 0x1133c,), # Grantha Sign Nukta ..Grantha Sign Nukta
(0x11340, 0x11340,), # Grantha Vowel Sign Ii ..Grantha Vowel Sign Ii
(0x11366, 0x1136c,), # Combining Grantha Digit ..Combining Grantha Digit
(0x11370, 0x11374,), # Combining Grantha Letter..Combining Grantha Letter
(0x114b3, 0x114b8,), # Tirhuta Vowel Sign U ..Tirhuta Vowel Sign Vocal
(0x114ba, 0x114ba,), # Tirhuta Vowel Sign Short..Tirhuta Vowel Sign Short
(0x114bf, 0x114c0,), # Tirhuta Sign Candrabindu..Tirhuta Sign Anusvara
(0x114c2, 0x114c3,), # Tirhuta Sign Virama ..Tirhuta Sign Nukta
(0x115b2, 0x115b5,), # Siddham Vowel Sign U ..Siddham Vowel Sign Vocal
(0x115bc, 0x115bd,), # Siddham Sign Candrabindu..Siddham Sign Anusvara
(0x115bf, 0x115c0,), # Siddham Sign Virama ..Siddham Sign Nukta
(0x115dc, 0x115dd,), # Siddham Vowel Sign Alter..Siddham Vowel Sign Alter
(0x11633, 0x1163a,), # Modi Vowel Sign U ..Modi Vowel Sign Ai
(0x1163d, 0x1163d,), # Modi Sign Anusvara ..Modi Sign Anusvara
(0x1163f, 0x11640,), # Modi Sign Virama ..Modi Sign Ardhacandra
(0x116ab, 0x116ab,), # Takri Sign Anusvara ..Takri Sign Anusvara
(0x116ad, 0x116ad,), # Takri Vowel Sign Aa ..Takri Vowel Sign Aa
(0x116b0, 0x116b5,), # Takri Vowel Sign U ..Takri Vowel Sign Au
(0x116b7, 0x116b7,), # Takri Sign Nukta ..Takri Sign Nukta
(0x1171d, 0x1171f,), # Ahom Consonant Sign Medi..Ahom Consonant Sign Medi
(0x11722, 0x11725,), # Ahom Vowel Sign I ..Ahom Vowel Sign Uu
(0x11727, 0x1172b,), # Ahom Vowel Sign Aw ..Ahom Sign Killer
(0x16af0, 0x16af4,), # Bassa Vah Combining High..Bassa Vah Combining High
(0x16b30, 0x16b36,), # Pahawh Hmong Mark Cim Tu..Pahawh Hmong Mark Cim Ta
(0x16f8f, 0x16f92,), # Miao Tone Right ..Miao Tone Below
(0x1bc9d, 0x1bc9e,), # Duployan Thick Letter Se..Duployan Double Mark
(0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
(0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
(0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
(0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
(0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
(0x1da00, 0x1da36,), # Signwriting Head Rim ..Signwriting Air Sucking
(0x1da3b, 0x1da6c,), # Signwriting Mouth Closed..Signwriting Excitement
(0x1da75, 0x1da75,), # Signwriting Upper Body T..Signwriting Upper Body T
(0x1da84, 0x1da84,), # Signwriting Location Hea..Signwriting Location Hea
(0x1da9b, 0x1da9f,), # Signwriting Fill Modifie..Signwriting Fill Modifie
(0x1daa1, 0x1daaf,), # Signwriting Rotation Mod..Signwriting Rotation Mod
(0x1e8d0, 0x1e8d6,), # Mende Kikakui Combining ..Mende Kikakui Combining
(0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
)
|
mit
| 4,727,986,660,246,039,000 | 4,043,802,047,920,106,500 | 73.077778 | 78 | 0.671616 | false |
jkunimune15/Map-Projections
|
src/zupplemental/compose_maps.py
|
1
|
5115
|
#compose_maps.py
#make ALL the maps
import math
from generate_borders import generate_borders
from generate_graticule import generate_graticule, generate_backdrop
from generate_indicatrices import generate_indicatrices
from generate_orthodromes import generate_orthodromes
from generate_shape import plot_shapes
from generate_labels import generate_topographical_labels, label_shapes, label_points
def compose_landmasses():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="land">')
plot_shapes('ne_50m_land', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="water">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t</g>')
def compose_graticule():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="graticule">')
generate_graticule(5, 1, include_tropics=True, adjust_poles=True)
print('\t\t</g>')
print('\t</g>')
def compose_graticule2():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="graticule">')
generate_graticule(15, .25, include_tropics=True, adjust_poles=True, double_dateline=True)
print('\t\t</g>')
print('\t</g>')
def compose_compound():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="land">')
plot_shapes('ne_50m_land', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="river">')
plot_shapes('ne_50m_rivers_lake_centerlines', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="graticule">')
generate_graticule(15, 1, include_tropics=True, adjust_poles=True)
print('\t\t</g>')
print('\t</g>')
def compose_indicatrices():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="land">')
plot_shapes('ne_50m_land', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="tissot">')
generate_indicatrices(15, math.radians(3.75), resolution=180, adjust_poles=True)
print('\t\t</g>')
print('\t</g>')
def compose_indicatrices2(ctr_meridian):
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="water">')
generate_backdrop(.5, ctr_meridian=ctr_meridian)
print('\t\t</g>')
print('\t\t<g class="land">')
plot_shapes('ne_110m_land', flesh_out_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_110m_lakes')
print('\t\t</g>')
print('\t\t<g class="graticule">')
generate_graticule(10, .5, double_dateline=(ctr_meridian==0))
print('\t\t</g>')
print('\t\t<g class="tissot">')
generate_indicatrices(30, 500/6371, ctr_meridian=ctr_meridian, adjust_poles=True, resolution=120, side_res=5, pole_res=120)
print('\t\t</g>')
print('\t</g>')
def compose_political():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="country">')
generate_borders('ne_50m', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t</g>')
label_shapes('ne_50m_admin_0_countries', "pol")
def compose_orthodromes():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="lines">')
generate_orthodromes()
print('\t\t</g>')
print('\t</g>')
def compose_everything():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="country">')
generate_borders('ne_10m', trim_antarctica=True, borders_only=False)
print('\t\t<g class="border">')
generate_borders('ne_10m', trim_antarctica=True, borders_only=True)
print('\t\t</g>')
print('\t\t</g>')
print('\t\t<g class="sovereign">')
plot_shapes('ne_10m_admin_0_map_units')
print('\t\t</g>')
print('\t\t<g class="admin">')
plot_shapes('ne_10m_admin_1_states_provinces_lines', filter_field='adm0_a3',
filter_vals=['RUS','CAN','CHN','USA','BRA','AUS','IND','ARG','KAZ'])
print('\t\t</g>')
print('\t\t<g class="dispute">')
plot_shapes('ne_10m_admin_0_boundary_lines_disputed_areas')
print('\t\t</g>')
print('\t\t<g class="coastline">')
plot_shapes('ne_10m_coastline', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="river">')
plot_shapes('ne_10m_rivers_lake_centerlines', max_rank=5)
print('\t\t</g>')
print('\t\t<g class="lake">')
plot_shapes('ne_10m_lakes', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="graticule">')
generate_graticule(5, 1, include_tropics=True, adjust_poles=True)
plot_shapes('ne_10m_geographic_lines', clazz="dateline", filter_field='name', filter_vals=["International Date Line"])
print('\t\t</g>')
print('\t</g>')
generate_topographical_labels('ne_50m', max_rank=2, text_size=4)
label_shapes('ne_10m_lakes', "sea", max_rank=1, text_size=1)
label_shapes('ne_10m_admin_0_countries', "pol", text_size=4)
label_points('cities_capital', "cap", text_size=1)
label_points('cities_other', "cit", text_size=0)
if __name__ == '__main__':
# compose_landmasses()
# compose_graticule()
# compose_compound()
# compose_indicatrices()
# compose_indicatrices2(-0)
# compose_political()
# compose_orthodromes()
compose_everything()
|
mit
| 2,645,040,251,199,792,600 | 6,335,135,880,211,719,000 | 32.874172 | 124 | 0.657869 | false |
sarvex/tensorflow
|
tensorflow/python/keras/preprocessing/image_dataset.py
|
6
|
11428
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras image dataset loading utilities."""
# pylint: disable=g-classes-have-attributes
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.keras.layers.preprocessing import image_preprocessing
from tensorflow.python.keras.preprocessing import dataset_utils
from tensorflow.python.keras.preprocessing import image as keras_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.util.tf_export import keras_export
ALLOWLIST_FORMATS = ('.bmp', '.gif', '.jpeg', '.jpg', '.png')
@keras_export('keras.utils.image_dataset_from_directory',
'keras.preprocessing.image_dataset_from_directory',
v1=[])
def image_dataset_from_directory(directory,
labels='inferred',
label_mode='int',
class_names=None,
color_mode='rgb',
batch_size=32,
image_size=(256, 256),
shuffle=True,
seed=None,
validation_split=None,
subset=None,
interpolation='bilinear',
follow_links=False,
crop_to_aspect_ratio=False,
**kwargs):
"""Generates a `tf.data.Dataset` from image files in a directory.
If your directory structure is:
```
main_directory/
...class_a/
......a_image_1.jpg
......a_image_2.jpg
...class_b/
......b_image_1.jpg
......b_image_2.jpg
```
Then calling `image_dataset_from_directory(main_directory, labels='inferred')`
will return a `tf.data.Dataset` that yields batches of images from
the subdirectories `class_a` and `class_b`, together with labels
0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
Supported image formats: jpeg, png, bmp, gif.
Animated gifs are truncated to the first frame.
Args:
directory: Directory where the data is located.
If `labels` is "inferred", it should contain
subdirectories, each containing images for a class.
Otherwise, the directory structure is ignored.
labels: Either "inferred"
(labels are generated from the directory structure),
None (no labels),
or a list/tuple of integer labels of the same size as the number of
image files found in the directory. Labels should be sorted according
to the alphanumeric order of the image file paths
(obtained via `os.walk(directory)` in Python).
label_mode:
- 'int': means that the labels are encoded as integers
(e.g. for `sparse_categorical_crossentropy` loss).
- 'categorical' means that the labels are
encoded as a categorical vector
(e.g. for `categorical_crossentropy` loss).
- 'binary' means that the labels (there can be only 2)
are encoded as `float32` scalars with values 0 or 1
(e.g. for `binary_crossentropy`).
- None (no labels).
class_names: Only valid if "labels" is "inferred". This is the explict
list of class names (must match names of subdirectories). Used
to control the order of the classes
(otherwise alphanumerical order is used).
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
Whether the images will be converted to
have 1, 3, or 4 channels.
batch_size: Size of the batches of data. Default: 32.
image_size: Size to resize images to after they are read from disk.
Defaults to `(256, 256)`.
Since the pipeline processes batches of images that must all have
the same size, this must be provided.
shuffle: Whether to shuffle the data. Default: True.
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
validation_split: Optional float between 0 and 1,
fraction of data to reserve for validation.
subset: One of "training" or "validation".
Only used if `validation_split` is set.
interpolation: String, the interpolation method used when resizing images.
Defaults to `bilinear`. Supports `bilinear`, `nearest`, `bicubic`,
`area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`.
follow_links: Whether to visits subdirectories pointed to by symlinks.
Defaults to False.
crop_to_aspect_ratio: If True, resize the images without aspect
ratio distortion. When the original aspect ratio differs from the target
aspect ratio, the output image will be cropped so as to return the largest
possible window in the image (of size `image_size`) that matches
the target aspect ratio. By default (`crop_to_aspect_ratio=False`),
aspect ratio may not be preserved.
**kwargs: Legacy keyword arguments.
Returns:
A `tf.data.Dataset` object.
- If `label_mode` is None, it yields `float32` tensors of shape
`(batch_size, image_size[0], image_size[1], num_channels)`,
encoding images (see below for rules regarding `num_channels`).
- Otherwise, it yields a tuple `(images, labels)`, where `images`
has shape `(batch_size, image_size[0], image_size[1], num_channels)`,
and `labels` follows the format described below.
Rules regarding labels format:
- if `label_mode` is `int`, the labels are an `int32` tensor of shape
`(batch_size,)`.
- if `label_mode` is `binary`, the labels are a `float32` tensor of
1s and 0s of shape `(batch_size, 1)`.
- if `label_mode` is `categorial`, the labels are a `float32` tensor
of shape `(batch_size, num_classes)`, representing a one-hot
encoding of the class index.
Rules regarding number of channels in the yielded images:
- if `color_mode` is `grayscale`,
there's 1 channel in the image tensors.
- if `color_mode` is `rgb`,
there are 3 channel in the image tensors.
- if `color_mode` is `rgba`,
there are 4 channel in the image tensors.
"""
if 'smart_resize' in kwargs:
crop_to_aspect_ratio = kwargs.pop('smart_resize')
if kwargs:
raise TypeError(f'Unknown keywords argument(s): {tuple(kwargs.keys())}')
if labels not in ('inferred', None):
if not isinstance(labels, (list, tuple)):
raise ValueError(
'`labels` argument should be a list/tuple of integer labels, of '
'the same size as the number of image files in the target '
'directory. If you wish to infer the labels from the subdirectory '
'names in the target directory, pass `labels="inferred"`. '
'If you wish to get a dataset that only contains images '
'(no labels), pass `label_mode=None`.')
if class_names:
raise ValueError('You can only pass `class_names` if the labels are '
'inferred from the subdirectory names in the target '
'directory (`labels="inferred"`).')
if label_mode not in {'int', 'categorical', 'binary', None}:
raise ValueError(
'`label_mode` argument must be one of "int", "categorical", "binary", '
'or None. Received: %s' % (label_mode,))
if labels is None or label_mode is None:
labels = None
label_mode = None
if color_mode == 'rgb':
num_channels = 3
elif color_mode == 'rgba':
num_channels = 4
elif color_mode == 'grayscale':
num_channels = 1
else:
raise ValueError(
'`color_mode` must be one of {"rbg", "rgba", "grayscale"}. '
'Received: %s' % (color_mode,))
interpolation = image_preprocessing.get_interpolation(interpolation)
dataset_utils.check_validation_split_arg(
validation_split, subset, shuffle, seed)
if seed is None:
seed = np.random.randint(1e6)
image_paths, labels, class_names = dataset_utils.index_directory(
directory,
labels,
formats=ALLOWLIST_FORMATS,
class_names=class_names,
shuffle=shuffle,
seed=seed,
follow_links=follow_links)
if label_mode == 'binary' and len(class_names) != 2:
raise ValueError(
'When passing `label_mode="binary", there must exactly 2 classes. '
'Found the following classes: %s' % (class_names,))
image_paths, labels = dataset_utils.get_training_or_validation_split(
image_paths, labels, validation_split, subset)
if not image_paths:
raise ValueError('No images found.')
dataset = paths_and_labels_to_dataset(
image_paths=image_paths,
image_size=image_size,
num_channels=num_channels,
labels=labels,
label_mode=label_mode,
num_classes=len(class_names),
interpolation=interpolation,
crop_to_aspect_ratio=crop_to_aspect_ratio)
if shuffle:
# Shuffle locally at each iteration
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
dataset = dataset.batch(batch_size)
# Users may need to reference `class_names`.
dataset.class_names = class_names
# Include file paths for images as attribute.
dataset.file_paths = image_paths
return dataset
def paths_and_labels_to_dataset(image_paths,
image_size,
num_channels,
labels,
label_mode,
num_classes,
interpolation,
crop_to_aspect_ratio=False):
"""Constructs a dataset of images and labels."""
# TODO(fchollet): consider making num_parallel_calls settable
path_ds = dataset_ops.Dataset.from_tensor_slices(image_paths)
args = (image_size, num_channels, interpolation, crop_to_aspect_ratio)
img_ds = path_ds.map(
lambda x: load_image(x, *args))
if label_mode:
label_ds = dataset_utils.labels_to_dataset(labels, label_mode, num_classes)
img_ds = dataset_ops.Dataset.zip((img_ds, label_ds))
return img_ds
def load_image(path, image_size, num_channels, interpolation,
crop_to_aspect_ratio=False):
"""Load an image from a path and resize it."""
img = io_ops.read_file(path)
img = image_ops.decode_image(
img, channels=num_channels, expand_animations=False)
if crop_to_aspect_ratio:
img = keras_image_ops.smart_resize(img, image_size,
interpolation=interpolation)
else:
img = image_ops.resize_images_v2(img, image_size, method=interpolation)
img.set_shape((image_size[0], image_size[1], num_channels))
return img
|
apache-2.0
| 3,221,280,118,123,887,600 | 3,232,350,920,145,217,500 | 42.452471 | 80 | 0.633357 | false |
ricardogsilva/QGIS
|
tests/src/python/test_qgsimagecache.py
|
41
|
5431
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsImageCache.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2018 by Nyall Dawson'
__date__ = '02/10/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import qgis # NOQA
import os
import socketserver
import threading
import http.server
import time
from qgis.PyQt.QtCore import QDir, QCoreApplication, QSize
from qgis.PyQt.QtGui import QColor, QImage, QPainter
from qgis.core import (QgsImageCache, QgsRenderChecker, QgsApplication, QgsMultiRenderChecker)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class SlowHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
time.sleep(1)
return http.server.SimpleHTTPRequestHandler.do_GET(self)
class TestQgsImageCache(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Bring up a simple HTTP server, for remote SVG tests
os.chdir(unitTestDataPath() + '')
handler = SlowHTTPRequestHandler
cls.httpd = socketserver.TCPServer(('localhost', 0), handler)
cls.port = cls.httpd.server_address[1]
cls.httpd_thread = threading.Thread(target=cls.httpd.serve_forever)
cls.httpd_thread.setDaemon(True)
cls.httpd_thread.start()
def setUp(self):
self.report = "<h1>Python QgsImageCache Tests</h1>\n"
self.fetched = False
QgsApplication.imageCache().remoteImageFetched.connect(self.imageFetched)
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def imageFetched(self):
self.fetched = True
def waitForFetch(self):
self.fetched = False
while not self.fetched:
QCoreApplication.processEvents()
def testRemoteImage(self):
"""Test fetching remote image."""
url = 'http://localhost:{}/qgis_local_server/sample_image.png'.format(str(TestQgsImageCache.port))
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote Image', 'waiting_image', image))
self.assertFalse(QgsApplication.imageCache().originalSize(url).isValid())
self.waitForFetch()
# second should be correct image
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True)
self.assertTrue(self.imageCheck('Remote Image', 'remote_image', image))
self.assertEqual(QgsApplication.imageCache().originalSize(url), QSize(511, 800), 1.0)
def testRemoteImageMissing(self):
"""Test fetching remote image with bad url"""
url = 'http://localhost:{}/qgis_local_server/xxx.png'.format(str(TestQgsImageCache.port)) # oooo naughty
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True)
self.assertTrue(self.imageCheck('Remote image missing', 'waiting_image', image))
def testRemoteImageBlocking(self):
"""Test fetching remote image."""
# remote not yet requested so not in cache
url = 'http://localhost:{}/qgis_local_server/logo_2017.png'.format(str(TestQgsImageCache.port))
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True, blocking=1)
# first should be correct image
self.assertTrue(self.imageCheck('Remote image sync', 'remote_image_blocking', image))
# remote probably in cache
url = 'http://localhost:{}/qgis_local_server/sample_image.png'.format(str(TestQgsImageCache.port))
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True, blocking=1)
self.assertTrue(self.imageCheck('Remote Image', 'remote_image', image))
# remote probably in cache
url = 'http://localhost:{}/qgis_local_server/xxx.png'.format(str(TestQgsImageCache.port)) # oooo naughty
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True, blocking=1)
self.assertTrue(self.imageCheck('Remote image missing', 'waiting_image', image))
def imageCheck(self, name, reference_image, image):
self.report += "<h2>Render {}</h2>\n".format(name)
temp_dir = QDir.tempPath() + '/'
file_name = temp_dir + 'image_' + name + ".png"
output_image = QImage(image.size(), QImage.Format_RGB32)
QgsMultiRenderChecker.drawBackground(output_image)
painter = QPainter(output_image)
painter.drawImage(0, 0, image)
painter.end()
output_image.save(file_name, "PNG")
checker = QgsRenderChecker()
checker.setControlPathPrefix("image_cache")
checker.setControlName("expected_" + reference_image)
checker.setRenderedImage(file_name)
checker.setColorTolerance(2)
result = checker.compareImages(name, 20)
self.report += checker.report()
print((self.report))
return result
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
| 4,069,818,811,089,207,300 | 1,710,354,312,100,561,700 | 37.792857 | 113 | 0.676303 | false |
saurabhjn76/sympy
|
examples/advanced/curvilinear_coordinates.py
|
96
|
3691
|
#!/usr/bin/env python
"""
This example shows how to work with coordinate transformations, curvilinear
coordinates and a little bit with differential geometry.
It takes polar, cylindrical, spherical, rotating disk coordinates and others
and calculates all kinds of interesting properties, like Jacobian, metric
tensor, Laplace operator, ...
"""
from sympy import var, sin, cos, pprint, Matrix, eye, trigsimp, Eq, \
Function, simplify, sinh, cosh, expand, symbols
def laplace(f, g_inv, g_det, X):
"""
Calculates Laplace(f), using the inverse metric g_inv, the determinant of
the metric g_det, all in variables X.
"""
r = 0
for i in range(len(X)):
for j in range(len(X)):
r += g_inv[i, j]*f.diff(X[i]).diff(X[j])
for sigma in range(len(X)):
for alpha in range(len(X)):
r += g_det.diff(X[sigma]) * g_inv[sigma, alpha] * \
f.diff(X[alpha]) / (2*g_det)
return r
def transform(name, X, Y, g_correct=None, recursive=False):
"""
Transforms from cartesian coordinates X to any curvilinear coordinates Y.
It printing useful information, like Jacobian, metric tensor, determinant
of metric, Laplace operator in the new coordinates, ...
g_correct ... if not None, it will be taken as the metric --- this is
useful if sympy's trigsimp() is not powerful enough to
simplify the metric so that it is usable for later
calculation. Leave it as None, only if the metric that
transform() prints is not simplified, you can help it by
specifying the correct one.
recursive ... apply recursive trigonometric simplification (use only when
needed, as it is an expensive operation)
"""
print("_"*80)
print("Transformation:", name)
for x, y in zip(X, Y):
pprint(Eq(y, x))
J = X.jacobian(Y)
print("Jacobian:")
pprint(J)
g = J.T*eye(J.shape[0])*J
g = g.applyfunc(expand)
print("metric tensor g_{ij}:")
pprint(g)
if g_correct is not None:
g = g_correct
print("metric tensor g_{ij} specified by hand:")
pprint(g)
print("inverse metric tensor g^{ij}:")
g_inv = g.inv(method="ADJ")
g_inv = g_inv.applyfunc(simplify)
pprint(g_inv)
print("det g_{ij}:")
g_det = g.det()
pprint(g_det)
f = Function("f")(*list(Y))
print("Laplace:")
pprint(laplace(f, g_inv, g_det, Y))
def main():
mu, nu, rho, theta, phi, sigma, tau, a, t, x, y, z, w = symbols(
"mu, nu, rho, theta, phi, sigma, tau, a, t, x, y, z, w")
transform("polar", Matrix([rho*cos(phi), rho*sin(phi)]), [rho, phi])
transform("cylindrical", Matrix([rho*cos(phi), rho*sin(phi), z]),
[rho, phi, z])
transform("spherical",
Matrix([rho*sin(theta)*cos(phi), rho*sin(theta)*sin(phi),
rho*cos(theta)]),
[rho, theta, phi],
recursive=True
)
transform("rotating disk",
Matrix([t,
x*cos(w*t) - y*sin(w*t),
x*sin(w*t) + y*cos(w*t),
z]),
[t, x, y, z])
transform("parabolic",
Matrix([sigma*tau, (tau**2 - sigma**2) / 2]),
[sigma, tau])
transform("bipolar",
Matrix([a*sinh(tau)/(cosh(tau)-cos(sigma)),
a*sin(sigma)/(cosh(tau)-cos(sigma))]),
[sigma, tau]
)
transform("elliptic",
Matrix([a*cosh(mu)*cos(nu), a*sinh(mu)*sin(nu)]),
[mu, nu]
)
if __name__ == "__main__":
main()
|
bsd-3-clause
| 7,184,108,011,603,554,000 | 6,077,501,840,746,178,000 | 30.818966 | 77 | 0.551341 | false |
Juniper/contrail-dev-neutron
|
neutron/plugins/embrane/common/constants.py
|
11
|
2821
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ivar Lazzaro, Embrane, Inc.
from heleosapi import exceptions as h_exc
from neutron.plugins.common import constants
# Router specific constants
UTIF_LIMIT = 7
QUEUE_TIMEOUT = 300
class Status:
# Transient
CREATING = constants.PENDING_CREATE
UPDATING = constants.PENDING_UPDATE
DELETING = constants.PENDING_DELETE
# Final
ACTIVE = constants.ACTIVE
ERROR = constants.ERROR
READY = constants.INACTIVE
DELETED = "DELETED" # not visible
class Events:
CREATE_ROUTER = "create_router"
UPDATE_ROUTER = "update_router"
DELETE_ROUTER = "delete_router"
GROW_ROUTER_IF = "grow_router_if"
SHRINK_ROUTER_IF = "shrink_router_if"
SET_NAT_RULE = "set_nat_rule"
RESET_NAT_RULE = "reset_nat_rule"
_DVA_PENDING_ERROR_MSG = _("Dva is pending for the following reason: %s")
_DVA_NOT_FOUNT_ERROR_MSG = _("Dva can't be found to execute the operation, "
"probably was cancelled through the heleos UI")
_DVA_BROKEN_ERROR_MSG = _("Dva seems to be broken for reason %s")
_DVA_BROKEN_INTERFACE_ERROR_MSG = _("Dva interface seems to be broken "
"for reason %s")
_DVA_CREATION_FAILED_ERROR_MSG = _("Dva creation failed reason %s")
_DVA_CREATION_PENDING_ERROR_MSG = _("Dva creation is in pending state "
"for reason %s")
_CFG_FAILED_ERROR_MSG = _("Dva configuration failed for reason %s")
_DVA_DEL_FAILED_ERROR_MSG = _("Failed to delete the backend "
"router for reason %s. Please remove "
"it manually through the heleos UI")
error_map = {h_exc.PendingDva: _DVA_PENDING_ERROR_MSG,
h_exc.DvaNotFound: _DVA_NOT_FOUNT_ERROR_MSG,
h_exc.BrokenDva: _DVA_BROKEN_ERROR_MSG,
h_exc.BrokenInterface: _DVA_BROKEN_INTERFACE_ERROR_MSG,
h_exc.DvaCreationFailed: _DVA_CREATION_FAILED_ERROR_MSG,
h_exc.DvaCreationPending: _DVA_CREATION_PENDING_ERROR_MSG,
h_exc.ConfigurationFailed: _CFG_FAILED_ERROR_MSG,
h_exc.DvaDeleteFailed: _DVA_DEL_FAILED_ERROR_MSG}
|
apache-2.0
| -5,398,273,397,242,657,000 | 5,450,342,108,531,321,000 | 38.180556 | 78 | 0.657214 | false |
michaelmior/lemur
|
docs/conf.py
|
8
|
8204
|
# -*- coding: utf-8 -*-
#
# lemur documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 7 18:43:48 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.autohttp.flask',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'lemur'
copyright = u'2015, Netflix Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'lemurdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'lemur.tex', u'Lemur Documentation',
u'Kevin Glisson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'Lemur', u'Lemur Documentation',
[u'Kevin Glisson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Lemur', u'Lemur Documentation',
u'Kevin Glisson', 'Lemur', 'SSL Certificate Management',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
apache-2.0
| 7,492,868,183,639,948,000 | -304,560,684,088,819,460 | 30.312977 | 79 | 0.705753 | false |
stevenbaker/dotfiles
|
.vim/bundle/jedi-vim/jedi/jedi/modules.py
|
2
|
14663
|
"""
Don't confuse these classes with :mod:`parsing_representation` modules, the
modules here can access these representation with ``module.parser.module``.
``Module`` exists mainly for caching purposes.
Basically :mod:`modules` offers the classes:
- ``CachedModule``, a base class for Cachedmodule.
- ``Module`` the class for all normal Python modules (not builtins, they are at
home at :mod:`builtin`).
- ``ModuleWithCursor``, holds the module information for :class:`api.Script`.
Apart from those classes there's a ``sys.path`` fetching function, as well as
`Virtual Env` and `Django` detection.
"""
from __future__ import with_statement
import re
import tokenizer as tokenize
import sys
import os
from ast import literal_eval
from jedi._compatibility import exec_function, unicode
from jedi import cache
from jedi import parsing_representation as pr
from jedi import fast_parser
from jedi import debug
from jedi import common
class CachedModule(object):
"""
The base type for all modules, which is not to be confused with
`parsing_representation.Module`. Caching happens here.
"""
def __init__(self, path=None, name=None):
self.path = path and os.path.abspath(path)
self.name = name
self._parser = None
@property
def parser(self):
""" get the parser lazy """
if self._parser is None:
self._parser = cache.load_module(self.path, self.name) \
or self._load_module()
return self._parser
def _get_source(self):
raise NotImplementedError()
def _load_module(self):
source = self._get_source()
p = self.path or self.name
p = fast_parser.FastParser(source, p)
cache.save_module(self.path, self.name, p)
return p
class Module(CachedModule):
"""
Manages all files, that are parsed and caches them.
:param path: The module path of the file.
:param source: The source code of the file.
"""
def __init__(self, path, source=None):
super(Module, self).__init__(path=path)
if source is None:
with open(path) as f:
source = f.read()
self.source = source_to_unicode(source)
self._line_cache = None
def _get_source(self):
""" Just one time """
s = self.source
del self.source # memory efficiency
return s
class ModuleWithCursor(Module):
"""
Manages all files, that are parsed and caches them.
Important are the params source and path, one of them has to
be there.
:param source: The source code of the file.
:param path: The module path of the file or None.
:param position: The position, the user is currently in. Only important \
for the main file.
"""
def __init__(self, path, source, position):
super(ModuleWithCursor, self).__init__(path, source)
self.position = position
self.source = source
self._path_until_cursor = None
# this two are only used, because there is no nonlocal in Python 2
self._line_temp = None
self._relevant_temp = None
@property
def parser(self):
""" get the parser lazy """
if not self._parser:
with common.ignored(KeyError):
parser = cache.parser_cache[self.path].parser
cache.invalidate_star_import_cache(parser.module)
# Call the parser already here, because it will be used anyways.
# Also, the position is here important (which will not be used by
# default), therefore fill the cache here.
self._parser = fast_parser.FastParser(self.source, self.path,
self.position)
# don't pickle that module, because it's changing fast
cache.save_module(self.path, self.name, self._parser,
pickling=False)
return self._parser
def get_path_until_cursor(self):
""" Get the path under the cursor. """
if self._path_until_cursor is None: # small caching
self._path_until_cursor, self._start_cursor_pos = \
self._get_path_until_cursor(self.position)
return self._path_until_cursor
def _get_path_until_cursor(self, start_pos=None):
def fetch_line():
if self._is_first:
self._is_first = False
self._line_length = self._column_temp
line = self._first_line
else:
line = self.get_line(self._line_temp)
self._line_length = len(line)
line = line + '\n'
# add lines with a backslash at the end
while True:
self._line_temp -= 1
last_line = self.get_line(self._line_temp)
#print self._line_temp, repr(last_line)
if last_line and last_line[-1] == '\\':
line = last_line[:-1] + ' ' + line
self._line_length = len(last_line)
else:
break
return line[::-1]
self._is_first = True
self._line_temp, self._column_temp = start_cursor = start_pos
self._first_line = self.get_line(self._line_temp)[:self._column_temp]
open_brackets = ['(', '[', '{']
close_brackets = [')', ']', '}']
gen = tokenize.generate_tokens(fetch_line)
string = ''
level = 0
force_point = False
last_type = None
try:
for token_type, tok, start, end, line in gen:
# print 'tok', token_type, tok, force_point
if last_type == token_type == tokenize.NAME:
string += ' '
if level > 0:
if tok in close_brackets:
level += 1
if tok in open_brackets:
level -= 1
elif tok == '.':
force_point = False
elif force_point:
# it is reversed, therefore a number is getting recognized
# as a floating point number
if token_type == tokenize.NUMBER and tok[0] == '.':
force_point = False
else:
break
elif tok in close_brackets:
level += 1
elif token_type in [tokenize.NAME, tokenize.STRING]:
force_point = True
elif token_type == tokenize.NUMBER:
pass
else:
self._column_temp = self._line_length - end[1]
break
x = start_pos[0] - end[0] + 1
l = self.get_line(x)
l = self._first_line if x == start_pos[0] else l
start_cursor = x, len(l) - end[1]
self._column_temp = self._line_length - end[1]
string += tok
last_type = token_type
except tokenize.TokenError:
debug.warning("Tokenize couldn't finish", sys.exc_info)
# string can still contain spaces at the end
return string[::-1].strip(), start_cursor
def get_path_under_cursor(self):
"""
Return the path under the cursor. If there is a rest of the path left,
it will be added to the stuff before it.
"""
return self.get_path_until_cursor() + self.get_path_after_cursor()
def get_path_after_cursor(self):
line = self.get_line(self.position[0])
return re.search("[\w\d]*", line[self.position[1]:]).group(0)
def get_operator_under_cursor(self):
line = self.get_line(self.position[0])
after = re.match("[^\w\s]+", line[self.position[1]:])
before = re.match("[^\w\s]+", line[:self.position[1]][::-1])
return (before.group(0) if before is not None else '') \
+ (after.group(0) if after is not None else '')
def get_context(self, yield_positions=False):
pos = self._start_cursor_pos
while True:
# remove non important white space
line = self.get_line(pos[0])
while True:
if pos[1] == 0:
line = self.get_line(pos[0] - 1)
if line and line[-1] == '\\':
pos = pos[0] - 1, len(line) - 1
continue
else:
break
if line[pos[1] - 1].isspace():
pos = pos[0], pos[1] - 1
else:
break
try:
result, pos = self._get_path_until_cursor(start_pos=pos)
if yield_positions:
yield pos
else:
yield result
except StopIteration:
if yield_positions:
yield None
else:
yield ''
def get_line(self, line_nr):
if not self._line_cache:
self._line_cache = self.source.splitlines()
if self.source:
if self.source[-1] == '\n':
self._line_cache.append('')
else: # ''.splitlines() == []
self._line_cache = ['']
if line_nr == 0:
# This is a fix for the zeroth line. We need a newline there, for
# the backwards parser.
return ''
if line_nr < 0:
raise StopIteration()
try:
return self._line_cache[line_nr - 1]
except IndexError:
raise StopIteration()
def get_position_line(self):
return self.get_line(self.position[0])[:self.position[1]]
def get_sys_path():
def check_virtual_env(sys_path):
""" Add virtualenv's site-packages to the `sys.path`."""
venv = os.getenv('VIRTUAL_ENV')
if not venv:
return
venv = os.path.abspath(venv)
p = os.path.join(
venv, 'lib', 'python%d.%d' % sys.version_info[:2], 'site-packages')
sys_path.insert(0, p)
check_virtual_env(sys.path)
return [p for p in sys.path if p != ""]
@cache.memoize_default([])
def sys_path_with_modifications(module):
def execute_code(code):
c = "import os; from os.path import *; result=%s"
variables = {'__file__': module.path}
try:
exec_function(c % code, variables)
except Exception:
debug.warning('sys path detected, but failed to evaluate')
return None
try:
res = variables['result']
if isinstance(res, str):
return os.path.abspath(res)
else:
return None
except KeyError:
return None
def check_module(module):
try:
possible_stmts = module.used_names['path']
except KeyError:
return get_sys_path()
sys_path = list(get_sys_path()) # copy
for p in possible_stmts:
if not isinstance(p, pr.Statement):
continue
commands = p.get_commands()
if len(commands) != 1: # sys.path command is just one thing.
continue
call = commands[0]
n = call.name
if not isinstance(n, pr.Name) or len(n.names) != 3:
continue
if n.names[:2] != ('sys', 'path'):
continue
array_cmd = n.names[2]
if call.execution is None:
continue
exe = call.execution
if not (array_cmd == 'insert' and len(exe) == 2
or array_cmd == 'append' and len(exe) == 1):
continue
if array_cmd == 'insert':
exe_type, exe.type = exe.type, pr.Array.NOARRAY
exe_pop = exe.values.pop(0)
res = execute_code(exe.get_code())
if res is not None:
sys_path.insert(0, res)
debug.dbg('sys path inserted: %s' % res)
exe.type = exe_type
exe.values.insert(0, exe_pop)
elif array_cmd == 'append':
res = execute_code(exe.get_code())
if res is not None:
sys_path.append(res)
debug.dbg('sys path added: %s' % res)
return sys_path
if module.path is None:
# Support for modules without a path is bad, therefore return the
# normal path.
return list(get_sys_path())
curdir = os.path.abspath(os.curdir)
with common.ignored(OSError):
os.chdir(os.path.dirname(module.path))
result = check_module(module)
result += detect_django_path(module.path)
# cleanup, back to old directory
os.chdir(curdir)
return result
def detect_django_path(module_path):
""" Detects the path of the very well known Django library (if used) """
result = []
while True:
new = os.path.dirname(module_path)
# If the module_path doesn't change anymore, we're finished -> /
if new == module_path:
break
else:
module_path = new
with common.ignored(IOError):
with open(module_path + os.path.sep + 'manage.py'):
debug.dbg('Found django path: %s' % module_path)
result.append(module_path)
return result
def source_to_unicode(source, encoding=None):
def detect_encoding():
""" For the implementation of encoding definitions in Python, look at:
http://www.python.org/dev/peps/pep-0263/
http://docs.python.org/2/reference/lexical_analysis.html#encoding-\
declarations
"""
byte_mark = literal_eval(r"b'\xef\xbb\xbf'")
if source.startswith(byte_mark):
# UTF-8 byte-order mark
return 'utf-8'
first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', str(source)).group(0)
possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)",
first_two_lines)
if possible_encoding:
return possible_encoding.group(1)
else:
# the default if nothing else has been set -> PEP 263
return encoding if encoding is not None else 'iso-8859-1'
if isinstance(source, unicode):
# only cast str/bytes
return source
# cast to unicode by default
return unicode(source, detect_encoding(), 'replace')
|
mit
| 2,451,655,936,304,339,000 | 5,534,351,418,652,305,000 | 34.16307 | 79 | 0.528882 | false |
lanen/youtube-dl
|
test/test_utils.py
|
34
|
31908
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Various small unit tests
import io
import json
import xml.etree.ElementTree
from youtube_dl.utils import (
age_restricted,
args_to_str,
clean_html,
DateRange,
detect_exe_version,
encodeFilename,
escape_rfc3986,
escape_url,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
InAdvancePagedList,
intlist_to_bytes,
is_html,
js_to_json,
limit_length,
OnDemandPagedList,
orderedSet,
parse_duration,
parse_filesize,
parse_iso8601,
read_batch_urls,
sanitize_filename,
sanitize_path,
prepend_extension,
replace_extension,
shell_quote,
smuggle_url,
str_to_int,
strip_jsonp,
struct_unpack,
timeconvert,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
lowercase_escape,
url_basename,
urlencode_postdata,
version_tuple,
xpath_with_ns,
xpath_element,
xpath_text,
xpath_attr,
render_table,
match_str,
parse_dfxp_time_expr,
dfxp2srt,
cli_option,
cli_valueless_option,
cli_bool_option,
)
class TestUtil(unittest.TestCase):
def test_timeconvert(self):
self.assertTrue(timeconvert('') is None)
self.assertTrue(timeconvert('bougrg') is None)
def test_sanitize_filename(self):
self.assertEqual(sanitize_filename('abc'), 'abc')
self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
self.assertEqual(sanitize_filename('123'), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de'))
self.assertFalse('/' in sanitize_filename('abc/de///'))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
self.assertEqual('yes no', sanitize_filename('yes? no'))
self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = 'ä'
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
self.assertEqual(sanitize_filename(tests), tests)
self.assertEqual(
sanitize_filename('New World record at 0:12:34'),
'New World record at 0_12_34')
self.assertEqual(sanitize_filename('--gasdgf'), '_-gasdgf')
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
self.assertEqual(sanitize_filename('.gasdgf'), 'gasdgf')
self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf')
forbidden = '"\0\\/'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc))
def test_sanitize_filename_restricted(self):
self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
self.assertEqual(sanitize_filename('123', restricted=True), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
tests = 'a\xe4b\u4e2d\u56fd\u7684c'
self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly
self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song')
self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech')
# .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
def test_sanitize_ids(self):
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
def test_sanitize_path(self):
if sys.platform != 'win32':
return
self.assertEqual(sanitize_path('abc'), 'abc')
self.assertEqual(sanitize_path('abc/def'), 'abc\\def')
self.assertEqual(sanitize_path('abc\\def'), 'abc\\def')
self.assertEqual(sanitize_path('abc|def'), 'abc#def')
self.assertEqual(sanitize_path('<>:"|?*'), '#######')
self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def')
self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def')
self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(
sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'),
'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s')
self.assertEqual(
sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'),
'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part')
self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#')
self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def')
self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#')
self.assertEqual(sanitize_path('../abc'), '..\\abc')
self.assertEqual(sanitize_path('../../abc'), '..\\..\\abc')
self.assertEqual(sanitize_path('./abc'), 'abc')
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
def test_prepend_extension(self):
self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(prepend_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext')
def test_replace_extension(self):
self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp')
self.assertEqual(replace_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(replace_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
def test_ordered_set(self):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
# keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
self.assertEqual(unescapeHTML('%20;'), '%20;')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(
unescapeHTML('é'), 'é')
def test_daterange(self):
_20century = DateRange("19000101", "20000101")
self.assertFalse("17890714" in _20century)
_ac = DateRange("00010101")
self.assertTrue("19690721" in _ac)
_firstmilenium = DateRange(end="10000101")
self.assertTrue("07110427" in _firstmilenium)
def test_unified_dates(self):
self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
self.assertEqual(
unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
'20141126')
self.assertEqual(
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
'20150202')
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
def test_find_xpath_attr(self):
testxml = '''<root>
<node/>
<node x="a"/>
<node x="a" y="c" />
<node x="b" y="d" />
<node x="" />
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4])
def test_xpath_with_ns(self):
testxml = '''<root xmlns:media="http://example.com/">
<media:song>
<media:author>The Author</media:author>
<url>http://server.com/download.mp3</url>
</media:song>
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
self.assertTrue(find('media:song') is not None)
self.assertEqual(find('media:song/media:author').text, 'The Author')
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
def test_xpath_element(self):
doc = xml.etree.ElementTree.Element('root')
div = xml.etree.ElementTree.SubElement(doc, 'div')
p = xml.etree.ElementTree.SubElement(div, 'p')
p.text = 'Foo'
self.assertEqual(xpath_element(doc, 'div/p'), p)
self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default')
self.assertTrue(xpath_element(doc, 'div/bar') is None)
self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True)
def test_xpath_text(self):
testxml = '''<root>
<div>
<p>Foo</p>
</div>
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default')
self.assertTrue(xpath_text(doc, 'div/bar') is None)
self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True)
def test_xpath_attr(self):
testxml = '''<root>
<div>
<p x="a">Foo</p>
</div>
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a')
self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None)
self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None)
self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default')
self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default')
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True)
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
def test_smuggle_url(self):
data = {"ö": "ö", "abc": [3]}
url = 'https://foo.bar/baz?x=y#a'
smug_url = smuggle_url(url, data)
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
self.assertEqual(url, unsmug_url)
self.assertEqual(data, unsmug_data)
res_url, res_data = unsmuggle_url(url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, None)
def test_shell_quote(self):
args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')]
self.assertEqual(shell_quote(args), """ffmpeg -i 'ñ€ß'"'"'.mp4'""")
def test_str_to_int(self):
self.assertEqual(str_to_int('123,456'), 123456)
self.assertEqual(str_to_int('123.456'), 123456)
def test_url_basename(self):
self.assertEqual(url_basename('http://foo.de/'), '')
self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz')
self.assertEqual(
url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
'trailer.mp4')
def test_parse_duration(self):
self.assertEqual(parse_duration(None), None)
self.assertEqual(parse_duration(False), None)
self.assertEqual(parse_duration('invalid'), None)
self.assertEqual(parse_duration('1'), 1)
self.assertEqual(parse_duration('1337:12'), 80232)
self.assertEqual(parse_duration('9:12:43'), 33163)
self.assertEqual(parse_duration('12:00'), 720)
self.assertEqual(parse_duration('00:01:01'), 61)
self.assertEqual(parse_duration('x:y'), None)
self.assertEqual(parse_duration('3h11m53s'), 11513)
self.assertEqual(parse_duration('3h 11m 53s'), 11513)
self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
self.assertEqual(parse_duration('62m45s'), 3765)
self.assertEqual(parse_duration('6m59s'), 419)
self.assertEqual(parse_duration('49s'), 49)
self.assertEqual(parse_duration('0h0m0s'), 0)
self.assertEqual(parse_duration('0m0s'), 0)
self.assertEqual(parse_duration('0s'), 0)
self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
self.assertEqual(parse_duration('T30M38S'), 1838)
self.assertEqual(parse_duration('5 s'), 5)
self.assertEqual(parse_duration('3 min'), 180)
self.assertEqual(parse_duration('2.5 hours'), 9000)
self.assertEqual(parse_duration('02:03:04'), 7384)
self.assertEqual(parse_duration('01:02:03:04'), 93784)
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
self.assertEqual(parse_duration('87 Min.'), 5220)
def test_fix_xml_ampersands(self):
self.assertEqual(
fix_xml_ampersands('"&x=y&z=a'), '"&x=y&z=a')
self.assertEqual(
fix_xml_ampersands('"&x=y&wrong;&z=a'),
'"&x=y&wrong;&z=a')
self.assertEqual(
fix_xml_ampersands('&'><"'),
'&'><"')
self.assertEqual(
fix_xml_ampersands('Ӓ᪼'), 'Ӓ᪼')
self.assertEqual(fix_xml_ampersands('&#&#'), '&#&#')
def test_paged_list(self):
def testPL(size, pagesize, sliceargs, expected):
def get_page(pagenum):
firstid = pagenum * pagesize
upto = min(size, pagenum * pagesize + pagesize)
for i in range(firstid, upto):
yield i
pl = OnDemandPagedList(get_page, pagesize)
got = pl.getslice(*sliceargs)
self.assertEqual(got, expected)
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
got = iapl.getslice(*sliceargs)
self.assertEqual(got, expected)
testPL(5, 2, (), [0, 1, 2, 3, 4])
testPL(5, 2, (1,), [1, 2, 3, 4])
testPL(5, 2, (2,), [2, 3, 4])
testPL(5, 2, (4,), [4])
testPL(5, 2, (0, 3), [0, 1, 2])
testPL(5, 2, (1, 4), [1, 2, 3])
testPL(5, 2, (2, 99), [2, 3, 4])
testPL(5, 2, (20, 99), [])
def test_struct_unpack(self):
self.assertEqual(struct_unpack('!B', b'\x00'), (0,))
def test_read_batch_urls(self):
f = io.StringIO('''\xef\xbb\xbf foo
bar\r
baz
# More after this line\r
; or after this
bam''')
self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam'])
def test_urlencode_postdata(self):
data = urlencode_postdata({'username': '[email protected]', 'password': '1234'})
self.assertTrue(isinstance(data, bytes))
def test_parse_iso8601(self):
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
def test_strip_jsonp(self):
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
d = json.loads(stripped)
self.assertEqual(d, [{"id": "532cb", "x": 3}])
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
d = json.loads(stripped)
self.assertEqual(d, {'STATUS': 'OK'})
def test_uppercase_escape(self):
self.assertEqual(uppercase_escape('aä'), 'aä')
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
def test_lowercase_escape(self):
self.assertEqual(lowercase_escape('aä'), 'aä')
self.assertEqual(lowercase_escape('\\u0026'), '&')
def test_limit_length(self):
self.assertEqual(limit_length(None, 12), None)
self.assertEqual(limit_length('foo', 12), 'foo')
self.assertTrue(
limit_length('foo bar baz asd', 12).startswith('foo bar'))
self.assertTrue('...' in limit_length('foo bar baz asd', 12))
def test_escape_rfc3986(self):
reserved = "!*'();:@&=+$,/?#[]"
unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
self.assertEqual(escape_rfc3986(reserved), reserved)
self.assertEqual(escape_rfc3986(unreserved), unreserved)
self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar')
self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar')
def test_escape_url(self):
self.assertEqual(
escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
)
self.assertEqual(
escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
)
self.assertEqual(
escape_url('http://тест.рф/фрагмент'),
'http://тест.рф/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
)
self.assertEqual(
escape_url('http://тест.рф/абв?абв=абв#абв'),
'http://тест.рф/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
)
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
def test_js_to_json_realworld(self):
inp = '''{
'clip':{'provider':'pseudo'}
}'''
self.assertEqual(js_to_json(inp), '''{
"clip":{"provider":"pseudo"}
}''')
json.loads(js_to_json(inp))
inp = '''{
'playlist':[{'controls':{'all':null}}]
}'''
self.assertEqual(js_to_json(inp), '''{
"playlist":[{"controls":{"all":null}}]
}''')
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
json_code = js_to_json(inp)
self.assertEqual(json.loads(json_code), json.loads(inp))
def test_js_to_json_edgecases(self):
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
on = js_to_json('{"abc": true}')
self.assertEqual(json.loads(on), {'abc': True})
# Ignore JavaScript code as well
on = js_to_json('''{
"x": 1,
y: "a",
z: some.code
}''')
d = json.loads(on)
self.assertEqual(d['x'], 1)
self.assertEqual(d['y'], 'a')
on = js_to_json('["abc", "def",]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('{"abc": "def",}')
self.assertEqual(json.loads(on), {'abc': 'def'})
def test_clean_html(self):
self.assertEqual(clean_html('a:\nb'), 'a: b')
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
def test_intlist_to_bytes(self):
self.assertEqual(
intlist_to_bytes([0, 1, 127, 128, 255]),
b'\x00\x01\x7f\x80\xff')
def test_args_to_str(self):
self.assertEqual(
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
'foo ba/r -baz \'2 be\' \'\''
)
def test_parse_filesize(self):
self.assertEqual(parse_filesize(None), None)
self.assertEqual(parse_filesize(''), None)
self.assertEqual(parse_filesize('91 B'), 91)
self.assertEqual(parse_filesize('foobar'), None)
self.assertEqual(parse_filesize('2 MiB'), 2097152)
self.assertEqual(parse_filesize('5 GB'), 5000000000)
self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
self.assertEqual(parse_filesize('1,24 KB'), 1240)
def test_version_tuple(self):
self.assertEqual(version_tuple('1'), (1,))
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
self.assertEqual(version_tuple('10.1-6'), (10, 1, 6)) # avconv style
def test_detect_exe_version(self):
self.assertEqual(detect_exe_version('''ffmpeg version 1.2.1
built on May 27 2013 08:37:26 with gcc 4.7 (Debian 4.7.3-4)
configuration: --prefix=/usr --extra-'''), '1.2.1')
self.assertEqual(detect_exe_version('''ffmpeg version N-63176-g1fb4685
built on May 15 2014 22:09:06 with gcc 4.8.2 (GCC)'''), 'N-63176-g1fb4685')
self.assertEqual(detect_exe_version('''X server found. dri2 connection failed!
Trying to open render node...
Success at /dev/dri/renderD128.
ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
def test_age_restricted(self):
self.assertFalse(age_restricted(None, 10)) # unrestricted content
self.assertFalse(age_restricted(1, None)) # unrestricted policy
self.assertFalse(age_restricted(8, 10))
self.assertTrue(age_restricted(18, 14))
self.assertFalse(age_restricted(18, 18))
def test_is_html(self):
self.assertFalse(is_html(b'\x49\x44\x43<html'))
self.assertTrue(is_html(b'<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-8 with BOM
b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-16-LE
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00'
))
self.assertTrue(is_html( # UTF-16-BE
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4'
))
self.assertTrue(is_html( # UTF-32-BE
b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4'))
self.assertTrue(is_html( # UTF-32-LE
b'\xFF\xFE\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4\x00\x00\x00'))
def test_render_table(self):
self.assertEqual(
render_table(
['a', 'bcd'],
[[123, 4], [9999, 51]]),
'a bcd\n'
'123 4\n'
'9999 51')
def test_match_str(self):
self.assertRaises(ValueError, match_str, 'xy>foobar', {})
self.assertFalse(match_str('xy', {'x': 1200}))
self.assertTrue(match_str('!xy', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 1200}))
self.assertFalse(match_str('!x', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 0}))
self.assertFalse(match_str('x>0', {'x': 0}))
self.assertFalse(match_str('x>0', {}))
self.assertTrue(match_str('x>?0', {}))
self.assertTrue(match_str('x>1K', {'x': 1200}))
self.assertFalse(match_str('x>2K', {'x': 1200}))
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 90, 'description': 'foo'}))
self.assertTrue(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 10}))
def test_parse_dfxp_time_expr(self):
self.assertEqual(parse_dfxp_time_expr(None), 0.0)
self.assertEqual(parse_dfxp_time_expr(''), 0.0)
self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1)
self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0)
self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1)
def test_dfxp2srt(self):
dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The following line contains Chinese characters and special symbols</p>
<p begin="1" end="2">第二行<br/>♪♪</p>
<p begin="2" dur="1"><span>Third<br/>Line</span></p>
</div>
</body>
</tt>'''
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The following line contains Chinese characters and special symbols
2
00:00:01,000 --> 00:00:02,000
第二行
♪♪
3
00:00:02,000 --> 00:00:03,000
Third
Line
'''
self.assertEqual(dfxp2srt(dfxp_data), srt_data)
dfxp_data_no_default_namespace = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The first line</p>
</div>
</body>
</tt>'''
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The first line
'''
self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data)
def test_cli_option(self):
self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128'])
self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({}, '--proxy', 'proxy'), [])
def test_cli_valueless_option(self):
self.assertEqual(cli_valueless_option(
{'downloader': 'external'}, '--external-downloader', 'downloader', 'external'), ['--external-downloader'])
self.assertEqual(cli_valueless_option(
{'downloader': 'internal'}, '--external-downloader', 'downloader', 'external'), [])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'), ['--no-check-certificate'])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': False}, '--no-check-certificate', 'nocheckcertificate'), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': True}, '--no-check-certificate', 'checkcertificate', False), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': False}, '--no-check-certificate', 'checkcertificate', False), ['--no-check-certificate'])
def test_cli_bool_option(self):
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'),
['--no-check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate', separator='='),
['--no-check-certificate=true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=true'])
if __name__ == '__main__':
unittest.main()
|
unlicense
| -5,701,131,007,354,605,000 | -8,336,766,864,095,941,000 | 42.767538 | 157 | 0.585688 | false |
tempbottle/ironpython3
|
Src/StdLib/Lib/test/test_univnewlines.py
|
130
|
3922
|
# Tests universal newline support for both reading and parsing files.
import io
import _pyio as pyio
import unittest
import os
import sys
from test import support
if not hasattr(sys.stdin, 'newlines'):
raise unittest.SkipTest(
"This Python does not have universal newline support")
FATX = 'x' * (2**14)
DATA_TEMPLATE = [
"line1=1",
"line2='this is a very long line designed to go past any default " +
"buffer limits that exist in io.py but we also want to test " +
"the uncommon case, naturally.'",
"def line3():pass",
"line4 = '%s'" % FATX,
]
DATA_LF = "\n".join(DATA_TEMPLATE) + "\n"
DATA_CR = "\r".join(DATA_TEMPLATE) + "\r"
DATA_CRLF = "\r\n".join(DATA_TEMPLATE) + "\r\n"
# Note that DATA_MIXED also tests the ability to recognize a lone \r
# before end-of-file.
DATA_MIXED = "\n".join(DATA_TEMPLATE) + "\r"
DATA_SPLIT = [x + "\n" for x in DATA_TEMPLATE]
class CTest:
open = io.open
class PyTest:
open = staticmethod(pyio.open)
class TestGenericUnivNewlines:
# use a class variable DATA to define the data to write to the file
# and a class variable NEWLINE to set the expected newlines value
READMODE = 'r'
WRITEMODE = 'wb'
def setUp(self):
data = self.DATA
if "b" in self.WRITEMODE:
data = data.encode("ascii")
with self.open(support.TESTFN, self.WRITEMODE) as fp:
fp.write(data)
def tearDown(self):
try:
os.unlink(support.TESTFN)
except:
pass
def test_read(self):
with self.open(support.TESTFN, self.READMODE) as fp:
data = fp.read()
self.assertEqual(data, DATA_LF)
self.assertEqual(repr(fp.newlines), repr(self.NEWLINE))
def test_readlines(self):
with self.open(support.TESTFN, self.READMODE) as fp:
data = fp.readlines()
self.assertEqual(data, DATA_SPLIT)
self.assertEqual(repr(fp.newlines), repr(self.NEWLINE))
def test_readline(self):
with self.open(support.TESTFN, self.READMODE) as fp:
data = []
d = fp.readline()
while d:
data.append(d)
d = fp.readline()
self.assertEqual(data, DATA_SPLIT)
self.assertEqual(repr(fp.newlines), repr(self.NEWLINE))
def test_seek(self):
with self.open(support.TESTFN, self.READMODE) as fp:
fp.readline()
pos = fp.tell()
data = fp.readlines()
self.assertEqual(data, DATA_SPLIT[1:])
fp.seek(pos)
data = fp.readlines()
self.assertEqual(data, DATA_SPLIT[1:])
class TestCRNewlines(TestGenericUnivNewlines):
NEWLINE = '\r'
DATA = DATA_CR
class CTestCRNewlines(CTest, TestCRNewlines, unittest.TestCase): pass
class PyTestCRNewlines(PyTest, TestCRNewlines, unittest.TestCase): pass
class TestLFNewlines(TestGenericUnivNewlines):
NEWLINE = '\n'
DATA = DATA_LF
class CTestLFNewlines(CTest, TestLFNewlines, unittest.TestCase): pass
class PyTestLFNewlines(PyTest, TestLFNewlines, unittest.TestCase): pass
class TestCRLFNewlines(TestGenericUnivNewlines):
NEWLINE = '\r\n'
DATA = DATA_CRLF
def test_tell(self):
with self.open(support.TESTFN, self.READMODE) as fp:
self.assertEqual(repr(fp.newlines), repr(None))
data = fp.readline()
pos = fp.tell()
self.assertEqual(repr(fp.newlines), repr(self.NEWLINE))
class CTestCRLFNewlines(CTest, TestCRLFNewlines, unittest.TestCase): pass
class PyTestCRLFNewlines(PyTest, TestCRLFNewlines, unittest.TestCase): pass
class TestMixedNewlines(TestGenericUnivNewlines):
NEWLINE = ('\r', '\n')
DATA = DATA_MIXED
class CTestMixedNewlines(CTest, TestMixedNewlines, unittest.TestCase): pass
class PyTestMixedNewlines(PyTest, TestMixedNewlines, unittest.TestCase): pass
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| 4,948,575,519,527,223,000 | 2,633,526,606,841,469,000 | 30.886179 | 77 | 0.644824 | false |
elysium001/zamboni
|
scripts/gaia_package.py
|
18
|
1605
|
"""
Gaia Marketplace helper. Takes packaged manifest as only argument.
Copy this into the Marketplace app folder (dev/stage/prod whatever), and run.
Fetches Marketplace package and Etags (e.g., m.f.c/packaged.webapp).
Downloads application.zip for you.
If metadata.json is in path, replaces the appropriate fields.
>> python gaia_package.py https://marketplace.firefox.com/packaged.webapp
And you're done!
"""
import json
import os
import requests
import sys
try:
manifest_url = sys.argv[1]
if not manifest_url.startswith('http'):
raise
except:
print "Please give a valid manifest (e.g., m.f.c/packaged.webapp)."
sys.exit(0)
r = requests.get(manifest_url)
package_path = json.loads(r.content)['package_path']
etag = r.headers['etag'].replace('"', '')
print "Downloading package"
r = requests.get(package_path)
package = r.content
package_etag = r.headers['etag'].replace('"', '')
f = open('application.zip', 'w')
f.write(package)
print "Package path: %s" % package_path
print "Etag: %s" % etag
print "Package Etag: %s" % package_etag
filename = 'metadata.json'
try:
f = open(filename, 'rw')
except:
sys.exit(0)
print "Updating metadata.json"
tmp_filename = 'metadata.json.tmp'
tmp_f = open(tmp_filename, 'w')
for line in f:
if '"etag"' in line:
line = r'%s%s%s' % (line[0:13], etag, line[-5:])
line = line.replace(r'\\', r'\\\\')
elif '"packageEtag"' in line:
line = r'%s%s%s' % (line[0:20], package_etag, line[-5:])
line = line.replace(r'\\', r'\\\\')
tmp_f.write(line)
tmp_f.close()
os.rename(tmp_filename, filename)
|
bsd-3-clause
| 1,577,594,039,898,212,000 | -5,659,738,339,819,758,000 | 25.75 | 77 | 0.661682 | false |
francisco-dlp/hyperspy
|
hyperspy/drawing/utils.py
|
1
|
57321
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import itertools
import textwrap
from traits import trait_base
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backend_bases import key_press_handler
import warnings
import numpy as np
from distutils.version import LooseVersion
import logging
import hyperspy as hs
_logger = logging.getLogger(__name__)
def contrast_stretching(data, saturated_pixels):
"""Calculate bounds that leaves out a given percentage of the data.
Parameters
----------
data: numpy array
saturated_pixels: scalar, None
The percentage of pixels that are left out of the bounds. For example,
the low and high bounds of a value of 1 are the 0.5% and 99.5%
percentiles. It must be in the [0, 100] range. If None, set the value
to 0.
Returns
-------
vmin, vmax: scalar
The low and high bounds
Raises
------
ValueError if the value of `saturated_pixels` is out of the valid range.
"""
# Sanity check
if saturated_pixels is None:
saturated_pixels = 0
if not 0 <= saturated_pixels <= 100:
raise ValueError(
"saturated_pixels must be a scalar in the range[0, 100]")
vmin = np.nanpercentile(data, saturated_pixels / 2.)
vmax = np.nanpercentile(data, 100 - saturated_pixels / 2.)
return vmin, vmax
MPL_DIVERGING_COLORMAPS = [
"BrBG",
"bwr",
"coolwarm",
"PiYG",
"PRGn",
"PuOr",
"RdBu",
"RdGy",
"RdYIBu",
"RdYIGn",
"seismic",
"Spectral", ]
# Add reversed colormaps
MPL_DIVERGING_COLORMAPS += [cmap + "_r" for cmap in MPL_DIVERGING_COLORMAPS]
def centre_colormap_values(vmin, vmax):
"""Calculate vmin and vmax to set the colormap midpoint to zero.
Parameters
----------
vmin, vmax : scalar
The range of data to display.
Returns
-------
cvmin, cvmax : scalar
The values to obtain a centre colormap.
"""
absmax = max(abs(vmin), abs(vmax))
return -absmax, absmax
def create_figure(window_title=None,
_on_figure_window_close=None,
disable_xyscale_keys=False,
**kwargs):
"""Create a matplotlib figure.
This function adds the possibility to execute another function
when the figure is closed and to easily set the window title. Any
keyword argument is passed to the plt.figure function
Parameters
----------
window_title : string
_on_figure_window_close : function
disable_xyscale_keys : bool, disable the `k`, `l` and `L` shortcuts which
toggle the x or y axis between linear and log scale.
Returns
-------
fig : plt.figure
"""
fig = plt.figure(**kwargs)
if window_title is not None:
# remove non-alphanumeric characters to prevent file saving problems
# This is a workaround for:
# https://github.com/matplotlib/matplotlib/issues/9056
reserved_characters = r'<>"/\|?*'
for c in reserved_characters:
window_title = window_title.replace(c, '')
window_title = window_title.replace('\n', ' ')
window_title = window_title.replace(':', ' -')
fig.canvas.set_window_title(window_title)
if disable_xyscale_keys and hasattr(fig.canvas, 'toolbar'):
# hack the `key_press_handler` to disable the `k`, `l`, `L` shortcuts
manager = fig.canvas.manager
fig.canvas.mpl_disconnect(manager.key_press_handler_id)
manager.key_press_handler_id = manager.canvas.mpl_connect(
'key_press_event',
lambda event: key_press_handler_custom(event, manager.canvas))
if _on_figure_window_close is not None:
on_figure_window_close(fig, _on_figure_window_close)
return fig
def key_press_handler_custom(event, canvas):
if event.key not in ['k', 'l', 'L']:
key_press_handler(event, canvas, canvas.manager.toolbar)
def on_figure_window_close(figure, function):
"""Connects a close figure signal to a given function.
Parameters
----------
figure : mpl figure instance
function : function
"""
def function_wrapper(evt):
function()
figure.canvas.mpl_connect('close_event', function_wrapper)
def plot_RGB_map(im_list, normalization='single', dont_plot=False):
"""Plot 2 or 3 maps in RGB.
Parameters
----------
im_list : list of Signal2D instances
normalization : {'single', 'global'}
dont_plot : bool
Returns
-------
array: RGB matrix
"""
# from widgets import cursors
height, width = im_list[0].data.shape[:2]
rgb = np.zeros((height, width, 3))
rgb[:, :, 0] = im_list[0].data.squeeze()
rgb[:, :, 1] = im_list[1].data.squeeze()
if len(im_list) == 3:
rgb[:, :, 2] = im_list[2].data.squeeze()
if normalization == 'single':
for i in range(len(im_list)):
rgb[:, :, i] /= rgb[:, :, i].max()
elif normalization == 'global':
rgb /= rgb.max()
rgb = rgb.clip(0, rgb.max())
if not dont_plot:
figure = plt.figure()
ax = figure.add_subplot(111)
ax.frameon = False
ax.set_axis_off()
ax.imshow(rgb, interpolation='nearest')
# cursors.set_mpl_ax(ax)
figure.canvas.draw_idle()
else:
return rgb
def subplot_parameters(fig):
"""Returns a list of the subplot parameters of a mpl figure.
Parameters
----------
fig : mpl figure
Returns
-------
tuple : (left, bottom, right, top, wspace, hspace)
"""
wspace = fig.subplotpars.wspace
hspace = fig.subplotpars.hspace
left = fig.subplotpars.left
right = fig.subplotpars.right
top = fig.subplotpars.top
bottom = fig.subplotpars.bottom
return left, bottom, right, top, wspace, hspace
class ColorCycle:
_color_cycle = [mpl.colors.colorConverter.to_rgba(color) for color
in ('b', 'g', 'r', 'c', 'm', 'y', 'k')]
def __init__(self):
self.color_cycle = copy.copy(self._color_cycle)
def __call__(self):
if not self.color_cycle:
self.color_cycle = copy.copy(self._color_cycle)
return self.color_cycle.pop(0)
def plot_signals(signal_list, sync=True, navigator="auto",
navigator_list=None, **kwargs):
"""Plot several signals at the same time.
Parameters
----------
signal_list : list of BaseSignal instances
If sync is set to True, the signals must have the
same navigation shape, but not necessarily the same signal shape.
sync : True or False, default "True"
If True: the signals will share navigation, all the signals
must have the same navigation shape for this to work, but not
necessarily the same signal shape.
navigator : {"auto", None, "spectrum", "slider", BaseSignal}, default "auto"
See signal.plot docstring for full description
navigator_list : {List of navigator arguments, None}, default None
Set different navigator options for the signals. Must use valid
navigator arguments: "auto", None, "spectrum", "slider", or a
hyperspy Signal. The list must have the same size as signal_list.
If None, the argument specified in navigator will be used.
**kwargs
Any extra keyword arguments are passed to each signal `plot` method.
Example
-------
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll])
Specifying the navigator:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll], navigator="slider")
Specifying the navigator for each signal:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> s_edx = hs.load("edx.dm3")
>>> s_adf = hs.load("adf.dm3")
>>> hs.plot.plot_signals(
[s_cl, s_ll, s_edx], navigator_list=["slider",None,s_adf])
"""
import hyperspy.signal
if navigator_list:
if not (len(signal_list) == len(navigator_list)):
raise ValueError(
"signal_list and navigator_list must"
" have the same size")
if sync:
axes_manager_list = []
for signal in signal_list:
axes_manager_list.append(signal.axes_manager)
if not navigator_list:
navigator_list = []
if navigator is None:
navigator_list.extend([None] * len(signal_list))
elif isinstance(navigator, hyperspy.signal.BaseSignal):
navigator_list.append(navigator)
navigator_list.extend([None] * (len(signal_list) - 1))
elif navigator == "slider":
navigator_list.append("slider")
navigator_list.extend([None] * (len(signal_list) - 1))
elif navigator == "spectrum":
navigator_list.extend(["spectrum"] * len(signal_list))
elif navigator == "auto":
navigator_list.extend(["auto"] * len(signal_list))
else:
raise ValueError(
"navigator must be one of \"spectrum\",\"auto\","
" \"slider\", None, a Signal instance")
# Check to see if the spectra have the same navigational shapes
temp_shape_first = axes_manager_list[0].navigation_shape
for i, axes_manager in enumerate(axes_manager_list):
temp_shape = axes_manager.navigation_shape
if not (temp_shape_first == temp_shape):
raise ValueError(
"The spectra does not have the same navigation shape")
axes_manager_list[i] = axes_manager.deepcopy()
if i > 0:
for axis0, axisn in zip(axes_manager_list[0].navigation_axes,
axes_manager_list[i].navigation_axes):
axes_manager_list[i]._axes[axisn.index_in_array] = axis0
del axes_manager
for signal, navigator, axes_manager in zip(signal_list,
navigator_list,
axes_manager_list):
signal.plot(axes_manager=axes_manager,
navigator=navigator,
**kwargs)
# If sync is False
else:
if not navigator_list:
navigator_list = []
navigator_list.extend([navigator] * len(signal_list))
for signal, navigator in zip(signal_list, navigator_list):
signal.plot(navigator=navigator,
**kwargs)
def _make_heatmap_subplot(spectra):
from hyperspy._signals.signal2d import Signal2D
im = Signal2D(spectra.data, axes=spectra.axes_manager._get_axes_dicts())
im.metadata.General.title = spectra.metadata.General.title
im.plot()
return im._plot.signal_plot.ax
def set_xaxis_lims(mpl_ax, hs_axis):
"""
Set the matplotlib axis limits to match that of a HyperSpy axis
Parameters
----------
mpl_ax : :class:`matplotlib.axis.Axis`
The ``matplotlib`` axis to change
hs_axis : :class:`~hyperspy.axes.DataAxis`
The data axis that contains the values that control the scaling
"""
x_axis_lower_lim = hs_axis.axis[0]
x_axis_upper_lim = hs_axis.axis[-1]
mpl_ax.set_xlim(x_axis_lower_lim, x_axis_upper_lim)
def _make_overlap_plot(spectra, ax, color="blue", line_style='-'):
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
spectrum = _transpose_if_required(spectrum, 1)
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_ylabel('Intensity')
ax.autoscale(tight=True)
def _make_cascade_subplot(
spectra, ax, color="blue", line_style='-', padding=1):
max_value = 0
for spectrum in spectra:
spectrum_yrange = (np.nanmax(spectrum.data) -
np.nanmin(spectrum.data))
if spectrum_yrange > max_value:
max_value = spectrum_yrange
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
spectrum = _transpose_if_required(spectrum, 1)
data_to_plot = ((spectrum.data - spectrum.data.min()) /
float(max_value) + spectrum_index * padding)
ax.plot(x_axis.axis, data_to_plot, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_yticks([])
ax.autoscale(tight=True)
def _plot_spectrum(spectrum, ax, color="blue", line_style='-'):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
def _set_spectrum_xlabel(spectrum, ax):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.set_xlabel("%s (%s)" % (x_axis.name, x_axis.units))
def _transpose_if_required(signal, expected_dimension):
# EDS profiles or maps have signal dimension = 0 and navigation dimension
# 1 or 2. For convenience transpose the signal if possible
if (signal.axes_manager.signal_dimension == 0 and
signal.axes_manager.navigation_dimension == expected_dimension):
return signal.T
else:
return signal
def plot_images(images,
cmap=None,
no_nans=False,
per_row=3,
label='auto',
labelwrap=30,
suptitle=None,
suptitle_fontsize=18,
colorbar='multi',
centre_colormap="auto",
saturated_pixels=0,
scalebar=None,
scalebar_color='white',
axes_decor='all',
padding=None,
tight_layout=False,
aspect='auto',
min_asp=0.1,
namefrac_thresh=0.4,
fig=None,
vmin=None,
vmax=None,
*args,
**kwargs):
"""Plot multiple images as sub-images in one figure.
Extra keyword arguments are passed to `matplotlib.figure`.
Parameters
----------
images : list of Signal2D or BaseSignal
`images` should be a list of Signals to plot. For `BaseSignal` with
navigation dimensions 2 and signal dimension 0, the signal will be
tranposed to form a `Signal2D`.
Multi-dimensional images will have each plane plotted as a separate
image.
If any signal shape is not suitable, a ValueError will be raised.
cmap : matplotlib colormap, list, or ``'mpl_colors'``, *optional*
The colormap used for the images, by default read from ``pyplot``.
A list of colormaps can also be provided, and the images will
cycle through them. Optionally, the value ``'mpl_colors'`` will
cause the cmap to loop through the default ``matplotlib``
colors (to match with the default output of the
:py:func:`~.drawing.utils.plot_spectra` method.
Note: if using more than one colormap, using the ``'single'``
option for ``colorbar`` is disallowed.
no_nans : bool, optional
If True, set nans to zero for plotting.
per_row : int, optional
The number of plots in each row
label : None, str, or list of str, optional
Control the title labeling of the plotted images.
If None, no titles will be shown.
If 'auto' (default), function will try to determine suitable titles
using Signal2D titles, falling back to the 'titles' option if no good
short titles are detected.
Works best if all images to be plotted have the same beginning
to their titles.
If 'titles', the title from each image's metadata.General.title
will be used.
If any other single str, images will be labeled in sequence using
that str as a prefix.
If a list of str, the list elements will be used to determine the
labels (repeated, if necessary).
labelwrap : int, optional
integer specifying the number of characters that will be used on
one line
If the function returns an unexpected blank figure, lower this
value to reduce overlap of the labels between each figure
suptitle : str, optional
Title to use at the top of the figure. If called with label='auto',
this parameter will override the automatically determined title.
suptitle_fontsize : int, optional
Font size to use for super title at top of figure
colorbar : {'multi', None, 'single'}
Controls the type of colorbars that are plotted.
If None, no colorbar is plotted.
If 'multi' (default), individual colorbars are plotted for each
(non-RGB) image
If 'single', all (non-RGB) images are plotted on the same scale,
and one colorbar is shown for all
centre_colormap : {"auto", True, False}
If True the centre of the color scheme is set to zero. This is
specially useful when using diverging color schemes. If "auto"
(default), diverging color schemes are automatically centred.
saturated_pixels: None, scalar or list of scalar, optional, default: 0
If list of scalar, the length should match the number of images to
show. If provide in the list, set the value to 0.
The percentage of pixels that are left out of the bounds. For
example, the low and high bounds of a value of 1 are the 0.5% and
99.5% percentiles. It must be in the [0, 100] range.
scalebar : {None, 'all', list of ints}, optional
If None (or False), no scalebars will be added to the images.
If 'all', scalebars will be added to all images.
If list of ints, scalebars will be added to each image specified.
scalebar_color : str, optional
A valid MPL color string; will be used as the scalebar color
axes_decor : {'all', 'ticks', 'off', None}, optional
Controls how the axes are displayed on each image; default is 'all'
If 'all', both ticks and axis labels will be shown
If 'ticks', no axis labels will be shown, but ticks/labels will
If 'off', all decorations and frame will be disabled
If None, no axis decorations will be shown, but ticks/frame will
padding : None or dict, optional
This parameter controls the spacing between images.
If None, default options will be used
Otherwise, supply a dictionary with the spacing options as
keywords and desired values as values
Values should be supplied as used in pyplot.subplots_adjust(),
and can be:
'left', 'bottom', 'right', 'top', 'wspace' (width),
and 'hspace' (height)
tight_layout : bool, optional
If true, hyperspy will attempt to improve image placement in
figure using matplotlib's tight_layout
If false, repositioning images inside the figure will be left as
an exercise for the user.
aspect : str or numeric, optional
If 'auto', aspect ratio is auto determined, subject to min_asp.
If 'square', image will be forced onto square display.
If 'equal', aspect ratio of 1 will be enforced.
If float (or int/long), given value will be used.
min_asp : float, optional
Minimum aspect ratio to be used when plotting images
namefrac_thresh : float, optional
Threshold to use for auto-labeling. This parameter controls how
much of the titles must be the same for the auto-shortening of
labels to activate. Can vary from 0 to 1. Smaller values
encourage shortening of titles by auto-labeling, while larger
values will require more overlap in titles before activing the
auto-label code.
fig : mpl figure, optional
If set, the images will be plotted to an existing MPL figure
vmin, vmax : scalar or list of scalar, optional, default: None
If list of scalar, the length should match the number of images to
show.
A list of scalar is not compatible with a single colorbar.
See vmin, vmax of matplotlib.imshow() for more details.
*args, **kwargs, optional
Additional arguments passed to matplotlib.imshow()
Returns
-------
axes_list : list
a list of subplot axes that hold the images
See Also
--------
plot_spectra : Plotting of multiple spectra
plot_signals : Plotting of multiple signals
plot_histograms : Compare signal histograms
Notes
-----
`interpolation` is a useful parameter to provide as a keyword
argument to control how the space between pixels is interpolated. A
value of ``'nearest'`` will cause no interpolation between pixels.
`tight_layout` is known to be quite brittle, so an option is provided
to disable it. Turn this option off if output is not as expected,
or try adjusting `label`, `labelwrap`, or `per_row`
"""
def __check_single_colorbar(cbar):
if cbar == 'single':
raise ValueError('Cannot use a single colorbar with multiple '
'colormaps. Please check for compatible '
'arguments.')
from hyperspy.drawing.widgets import ScaleBar
from hyperspy.misc import rgb_tools
from hyperspy.signal import BaseSignal
# Check that we have a hyperspy signal
im = [images] if not isinstance(images, (list, tuple)) else images
for image in im:
if not isinstance(image, BaseSignal):
raise ValueError("`images` must be a list of image signals or a "
"multi-dimensional signal."
" " + repr(type(images)) + " was given.")
# For list of EDS maps, transpose the BaseSignal
if isinstance(images, (list, tuple)):
images = [_transpose_if_required(image, 2) for image in images]
# If input is >= 1D signal (e.g. for multi-dimensional plotting),
# copy it and put it in a list so labeling works out as (x,y) when plotting
if isinstance(images,
BaseSignal) and images.axes_manager.navigation_dimension > 0:
images = [images._deepcopy_with_new_data(images.data)]
n = 0
for i, sig in enumerate(images):
if sig.axes_manager.signal_dimension != 2:
raise ValueError("This method only plots signals that are images. "
"The signal dimension must be equal to 2. "
"The signal at position " + repr(i) +
" was " + repr(sig) + ".")
# increment n by the navigation size, or by 1 if the navigation size is
# <= 0
n += (sig.axes_manager.navigation_size
if sig.axes_manager.navigation_size > 0
else 1)
# If no cmap given, get default colormap from pyplot:
if cmap is None:
cmap = [plt.get_cmap().name]
elif cmap == 'mpl_colors':
for n_color, c in enumerate(mpl.rcParams['axes.prop_cycle']):
make_cmap(colors=['#000000', c['color']],
name='mpl{}'.format(n_color))
cmap = ['mpl{}'.format(i) for i in
range(len(mpl.rcParams['axes.prop_cycle']))]
__check_single_colorbar(colorbar)
# cmap is list, tuple, or something else iterable (but not string):
elif hasattr(cmap, '__iter__') and not isinstance(cmap, str):
try:
cmap = [c.name for c in cmap] # convert colormap to string
except AttributeError:
cmap = [c for c in cmap] # c should be string if not colormap
__check_single_colorbar(colorbar)
elif isinstance(cmap, mpl.colors.Colormap):
cmap = [cmap.name] # convert single colormap to list with string
elif isinstance(cmap, str):
cmap = [cmap] # cmap is single string, so make it a list
else:
# Didn't understand cmap input, so raise error
raise ValueError('The provided cmap value was not understood. Please '
'check input values.')
# If any of the cmaps given are diverging, and auto-centering, set the
# appropriate flag:
if centre_colormap == "auto":
centre_colormaps = []
for c in cmap:
if c in MPL_DIVERGING_COLORMAPS:
centre_colormaps.append(True)
else:
centre_colormaps.append(False)
# if it was True, just convert to list
elif centre_colormap:
centre_colormaps = [True]
# likewise for false
elif not centre_colormap:
centre_colormaps = [False]
# finally, convert lists to cycle generators for adaptive length:
centre_colormaps = itertools.cycle(centre_colormaps)
cmap = itertools.cycle(cmap)
def _check_arg(arg, default_value, arg_name):
if isinstance(arg, list):
if len(arg) != n:
_logger.warning('The provided {} values are ignored because the '
'length of the list does not match the number of '
'images'.format(arg_name))
arg = [default_value] * n
else:
arg = [arg] * n
return arg
vmin = _check_arg(vmin, None, 'vmin')
vmax = _check_arg(vmax, None, 'vmax')
saturated_pixels = _check_arg(saturated_pixels, 0, 'saturated_pixels')
# Sort out the labeling:
div_num = 0
all_match = False
shared_titles = False
user_labels = False
if label is None:
pass
elif label == 'auto':
# Use some heuristics to try to get base string of similar titles
label_list = [x.metadata.General.title for x in images]
# Find the shortest common string between the image titles
# and pull that out as the base title for the sequence of images
# array in which to store arrays
res = np.zeros((len(label_list), len(label_list[0]) + 1))
res[:, 0] = 1
# j iterates the strings
for j in range(len(label_list)):
# i iterates length of substring test
for i in range(1, len(label_list[0]) + 1):
# stores whether or not characters in title match
res[j, i] = label_list[0][:i] in label_list[j]
# sum up the results (1 is True, 0 is False) and create
# a substring based on the minimum value (this will be
# the "smallest common string" between all the titles
if res.all():
basename = label_list[0]
div_num = len(label_list[0])
all_match = True
else:
div_num = int(min(np.sum(res, 1)))
basename = label_list[0][:div_num - 1]
all_match = False
# trim off any '(' or ' ' characters at end of basename
if div_num > 1:
while True:
if basename[len(basename) - 1] == '(':
basename = basename[:-1]
elif basename[len(basename) - 1] == ' ':
basename = basename[:-1]
else:
break
# namefrac is ratio of length of basename to the image name
# if it is high (e.g. over 0.5), we can assume that all images
# share the same base
if len(label_list[0]) > 0:
namefrac = float(len(basename)) / len(label_list[0])
else:
# If label_list[0] is empty, it means there was probably no
# title set originally, so nothing to share
namefrac = 0
if namefrac > namefrac_thresh:
# there was a significant overlap of label beginnings
shared_titles = True
# only use new suptitle if one isn't specified already
if suptitle is None:
suptitle = basename
else:
# there was not much overlap, so default back to 'titles' mode
shared_titles = False
label = 'titles'
div_num = 0
elif label == 'titles':
# Set label_list to each image's pre-defined title
label_list = [x.metadata.General.title for x in images]
elif isinstance(label, str):
# Set label_list to an indexed list, based off of label
label_list = [label + " " + repr(num) for num in range(n)]
elif isinstance(label, list) and all(
isinstance(x, str) for x in label):
label_list = label
user_labels = True
# If list of labels is longer than the number of images, just use the
# first n elements
if len(label_list) > n:
del label_list[n:]
if len(label_list) < n:
label_list *= (n // len(label_list)) + 1
del label_list[n:]
else:
raise ValueError("Did not understand input of labels.")
# Determine appropriate number of images per row
rows = int(np.ceil(n / float(per_row)))
if n < per_row:
per_row = n
# Set overall figure size and define figure (if not pre-existing)
if fig is None:
k = max(plt.rcParams['figure.figsize']) / max(per_row, rows)
f = plt.figure(figsize=(tuple(k * i for i in (per_row, rows))))
else:
f = fig
# Initialize list to hold subplot axes
axes_list = []
# Initialize list of rgb tags
isrgb = [False] * len(images)
# Check to see if there are any rgb images in list
# and tag them using the isrgb list
for i, img in enumerate(images):
if rgb_tools.is_rgbx(img.data):
isrgb[i] = True
# Determine how many non-rgb Images there are
non_rgb = list(itertools.compress(images, [not j for j in isrgb]))
if len(non_rgb) == 0 and colorbar is not None:
colorbar = None
warnings.warn("Sorry, colorbar is not implemented for RGB images.")
# Find global min and max values of all the non-rgb images for use with
# 'single' scalebar
if colorbar == 'single':
# get a g_saturated_pixels from saturated_pixels
if isinstance(saturated_pixels, list):
g_saturated_pixels = min(np.array([v for v in saturated_pixels]))
else:
g_saturated_pixels = saturated_pixels
# estimate a g_vmin and g_max from saturated_pixels
g_vmin, g_vmax = contrast_stretching(np.concatenate(
[i.data.flatten() for i in non_rgb]), g_saturated_pixels)
# if vmin and vmax are provided, override g_min and g_max
if isinstance(vmin, list):
_logger.warning('vmin have to be a scalar to be compatible with a '
'single colorbar')
else:
g_vmin = vmin if vmin is not None else g_vmin
if isinstance(vmax, list):
_logger.warning('vmax have to be a scalar to be compatible with a '
'single colorbar')
else:
g_vmax = vmax if vmax is not None else g_vmax
if next(centre_colormaps):
g_vmin, g_vmax = centre_colormap_values(g_vmin, g_vmax)
# Check if we need to add a scalebar for some of the images
if isinstance(scalebar, list) and all(isinstance(x, int)
for x in scalebar):
scalelist = True
else:
scalelist = False
idx = 0
ax_im_list = [0] * len(isrgb)
# Replot: create a list to store references to the images
replot_ims = []
# Loop through each image, adding subplot for each one
for i, ims in enumerate(images):
# Get handles for the signal axes and axes_manager
axes_manager = ims.axes_manager
if axes_manager.navigation_dimension > 0:
ims = ims._deepcopy_with_new_data(ims.data)
for j, im in enumerate(ims):
ax = f.add_subplot(rows, per_row, idx + 1)
axes_list.append(ax)
data = im.data
centre = next(centre_colormaps) # get next value for centreing
# Enable RGB plotting
if rgb_tools.is_rgbx(data):
data = rgb_tools.rgbx2regular_array(data, plot_friendly=True)
l_vmin, l_vmax = None, None
else:
data = im.data
# Find min and max for contrast
l_vmin, l_vmax = contrast_stretching(
data, saturated_pixels[idx])
l_vmin = vmin[idx] if vmin[idx] is not None else l_vmin
l_vmax = vmax[idx] if vmax[idx] is not None else l_vmax
if centre:
l_vmin, l_vmax = centre_colormap_values(l_vmin, l_vmax)
# Remove NaNs (if requested)
if no_nans:
data = np.nan_to_num(data)
# Get handles for the signal axes and axes_manager
axes_manager = im.axes_manager
axes = axes_manager.signal_axes
# Set dimensions of images
xaxis = axes[0]
yaxis = axes[1]
extent = (
xaxis.low_value,
xaxis.high_value,
yaxis.high_value,
yaxis.low_value,
)
if not isinstance(aspect, (int, float)) and aspect not in [
'auto', 'square', 'equal']:
_logger.warning("Did not understand aspect ratio input. "
"Using 'auto' as default.")
aspect = 'auto'
if aspect == 'auto':
if float(yaxis.size) / xaxis.size < min_asp:
factor = min_asp * float(xaxis.size) / yaxis.size
elif float(yaxis.size) / xaxis.size > min_asp ** -1:
factor = min_asp ** -1 * float(xaxis.size) / yaxis.size
else:
factor = 1
asp = np.abs(factor * float(xaxis.scale) / yaxis.scale)
elif aspect == 'square':
asp = abs(extent[1] - extent[0]) / abs(extent[3] - extent[2])
elif aspect == 'equal':
asp = 1
elif isinstance(aspect, (int, float)):
asp = aspect
if 'interpolation' not in kwargs.keys():
kwargs['interpolation'] = 'nearest'
# Get colormap for this image:
cm = next(cmap)
# Plot image data, using vmin and vmax to set bounds,
# or allowing them to be set automatically if using individual
# colorbars
if colorbar == 'single' and not isrgb[i]:
axes_im = ax.imshow(data,
cmap=cm,
extent=extent,
vmin=g_vmin, vmax=g_vmax,
aspect=asp,
*args, **kwargs)
ax_im_list[i] = axes_im
else:
axes_im = ax.imshow(data,
cmap=cm,
extent=extent,
vmin=l_vmin,
vmax=l_vmax,
aspect=asp,
*args, **kwargs)
ax_im_list[i] = axes_im
# If an axis trait is undefined, shut off :
if isinstance(xaxis.units, trait_base._Undefined) or \
isinstance(yaxis.units, trait_base._Undefined) or \
isinstance(xaxis.name, trait_base._Undefined) or \
isinstance(yaxis.name, trait_base._Undefined):
if axes_decor == 'all':
_logger.warning(
'Axes labels were requested, but one '
'or both of the '
'axes units and/or name are undefined. '
'Axes decorations have been set to '
'\'ticks\' instead.')
axes_decor = 'ticks'
# If all traits are defined, set labels as appropriate:
else:
ax.set_xlabel(axes[0].name + " axis (" + axes[0].units + ")")
ax.set_ylabel(axes[1].name + " axis (" + axes[1].units + ")")
if label:
if all_match:
title = ''
elif shared_titles:
title = label_list[i][div_num - 1:]
else:
if len(ims) == n:
# This is true if we are plotting just 1
# multi-dimensional Signal2D
title = label_list[idx]
elif user_labels:
title = label_list[idx]
else:
title = label_list[i]
if ims.axes_manager.navigation_size > 1 and not user_labels:
title += " %s" % str(ims.axes_manager.indices)
ax.set_title(textwrap.fill(title, labelwrap))
# Set axes decorations based on user input
set_axes_decor(ax, axes_decor)
# If using independent colorbars, add them
if colorbar == 'multi' and not isrgb[i]:
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(axes_im, cax=cax)
# Add scalebars as necessary
if (scalelist and idx in scalebar) or scalebar == 'all':
ax.scalebar = ScaleBar(
ax=ax,
units=axes[0].units,
color=scalebar_color,
)
# Replot: store references to the images
replot_ims.append(im)
idx += 1
# If using a single colorbar, add it, and do tight_layout, ensuring that
# a colorbar is only added based off of non-rgb Images:
if colorbar == 'single':
foundim = None
for i in range(len(isrgb)):
if (not isrgb[i]) and foundim is None:
foundim = i
if foundim is not None:
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.9, 0.1, 0.03, 0.8])
f.colorbar(ax_im_list[foundim], cax=cbar_ax)
if tight_layout:
# tight_layout, leaving room for the colorbar
plt.tight_layout(rect=[0, 0, 0.9, 1])
elif tight_layout:
plt.tight_layout()
elif tight_layout:
plt.tight_layout()
# Set top bounds for shared titles and add suptitle
if suptitle:
f.subplots_adjust(top=0.85)
f.suptitle(suptitle, fontsize=suptitle_fontsize)
# If we want to plot scalebars, loop through the list of axes and add them
if scalebar is None or scalebar is False:
# Do nothing if no scalebars are called for
pass
elif scalebar == 'all':
# scalebars were taken care of in the plotting loop
pass
elif scalelist:
# scalebars were taken care of in the plotting loop
pass
else:
raise ValueError("Did not understand scalebar input. Must be None, "
"\'all\', or list of ints.")
# Adjust subplot spacing according to user's specification
if padding is not None:
plt.subplots_adjust(**padding)
# Replot: connect function
def on_dblclick(event):
# On the event of a double click, replot the selected subplot
if not event.inaxes:
return
if not event.dblclick:
return
subplots = [axi for axi in f.axes if isinstance(axi, mpl.axes.Subplot)]
inx = list(subplots).index(event.inaxes)
im = replot_ims[inx]
# Use some of the info in the subplot
cm = subplots[inx].images[0].get_cmap()
clim = subplots[inx].images[0].get_clim()
sbar = False
if (scalelist and inx in scalebar) or scalebar == 'all':
sbar = True
im.plot(colorbar=bool(colorbar),
vmin=clim[0],
vmax=clim[1],
no_nans=no_nans,
aspect=asp,
scalebar=sbar,
scalebar_color=scalebar_color,
cmap=cm)
f.canvas.mpl_connect('button_press_event', on_dblclick)
return axes_list
def set_axes_decor(ax, axes_decor):
if axes_decor == 'off':
ax.axis('off')
elif axes_decor == 'ticks':
ax.set_xlabel('')
ax.set_ylabel('')
elif axes_decor == 'all':
pass
elif axes_decor is None:
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_xticklabels([])
ax.set_yticklabels([])
def make_cmap(colors, name='my_colormap', position=None,
bit=False, register=True):
"""
Create a matplotlib colormap with customized colors, optionally registering
it with matplotlib for simplified use.
Adapted from Chris Slocum's code at:
https://github.com/CSlocumWX/custom_colormap/blob/master/custom_colormaps.py
and used under the terms of that code's BSD-3 license
Parameters
----------
colors : iterable
list of either tuples containing rgb values, or html strings
Colors should be arranged so that the first color is the lowest
value for the colorbar and the last is the highest.
name : str
name of colormap to use when registering with matplotlib
position : None or iterable
list containing the values (from [0,1]) that dictate the position
of each color within the colormap. If None (default), the colors
will be equally-spaced within the colorbar.
bit : boolean
True if RGB colors are given in 8-bit [0 to 255] or False if given
in arithmetic basis [0 to 1] (default)
register : boolean
switch to control whether or not to register the custom colormap
with matplotlib in order to enable use by just the name string
"""
def _html_color_to_rgb(color_string):
""" convert #RRGGBB to an (R, G, B) tuple """
color_string = color_string.strip()
if color_string[0] == '#':
color_string = color_string[1:]
if len(color_string) != 6:
raise ValueError(
"input #{} is not in #RRGGBB format".format(color_string))
r, g, b = color_string[:2], color_string[2:4], color_string[4:]
r, g, b = [int(n, 16) / 255 for n in (r, g, b)]
return r, g, b
bit_rgb = np.linspace(0, 1, 256)
if position is None:
position = np.linspace(0, 1, len(colors))
else:
if len(position) != len(colors):
raise ValueError("position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
raise ValueError("position must start with 0 and end with 1")
cdict = {'red': [], 'green': [], 'blue': []}
for pos, color in zip(position, colors):
if isinstance(color, str):
color = _html_color_to_rgb(color)
elif bit:
color = (bit_rgb[color[0]],
bit_rgb[color[1]],
bit_rgb[color[2]])
cdict['red'].append((pos, color[0], color[0]))
cdict['green'].append((pos, color[1], color[1]))
cdict['blue'].append((pos, color[2], color[2]))
cmap = mpl.colors.LinearSegmentedColormap(name, cdict, 256)
if register:
mpl.cm.register_cmap(name, cmap)
return cmap
def plot_spectra(
spectra,
style='overlap',
color=None,
line_style=None,
padding=1.,
legend=None,
legend_picking=True,
legend_loc='upper right',
fig=None,
ax=None,
**kwargs):
"""Plot several spectra in the same figure.
Extra keyword arguments are passed to `matplotlib.figure`.
Parameters
----------
spectra : list of Signal1D or BaseSignal
Ordered spectra list of signal to plot. If `style` is "cascade" or
"mosaic" the spectra can have different size and axes. For `BaseSignal`
with navigation dimensions 1 and signal dimension 0, the signal will be
tranposed to form a `Signal1D`.
style : {'overlap', 'cascade', 'mosaic', 'heatmap'}
The style of the plot.
color : matplotlib color or a list of them or `None`
Sets the color of the lines of the plots (no action on 'heatmap').
If a list, if its length is less than the number of spectra to plot,
the colors will be cycled. If `None`, use default matplotlib color
cycle.
line_style: matplotlib line style or a list of them or `None`
Sets the line style of the plots (no action on 'heatmap').
The main line style are '-','--','steps','-.',':'.
If a list, if its length is less than the number of
spectra to plot, line_style will be cycled. If
If `None`, use continuous lines, eg: ('-','--','steps','-.',':')
padding : float, optional, default 0.1
Option for "cascade". 1 guarantees that there is not overlapping.
However, in many cases a value between 0 and 1 can produce a tighter
plot without overlapping. Negative values have the same effect but
reverse the order of the spectra without reversing the order of the
colors.
legend: None or list of str or 'auto'
If list of string, legend for "cascade" or title for "mosaic" is
displayed. If 'auto', the title of each spectra (metadata.General.title)
is used.
legend_picking: bool
If true, a spectrum can be toggle on and off by clicking on
the legended line.
legend_loc : str or int
This parameter controls where the legend is placed on the figure;
see the pyplot.legend docstring for valid values
fig : matplotlib figure or None
If None, a default figure will be created. Specifying fig will
not work for the 'heatmap' style.
ax : matplotlib ax (subplot) or None
If None, a default ax will be created. Will not work for 'mosaic'
or 'heatmap' style.
**kwargs
remaining keyword arguments are passed to matplotlib.figure() or
matplotlib.subplots(). Has no effect on 'heatmap' style.
Example
-------
>>> s = hs.load("some_spectra")
>>> hs.plot.plot_spectra(s, style='cascade', color='red', padding=0.5)
To save the plot as a png-file
>>> hs.plot.plot_spectra(s).figure.savefig("test.png")
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
import hyperspy.signal
def _reverse_legend(ax_, legend_loc_):
"""
Reverse the ordering of a matplotlib legend (to be more consistent
with the default ordering of plots in the 'cascade' and 'overlap'
styles
Parameters
----------
ax_: matplotlib axes
legend_loc_: str or int
This parameter controls where the legend is placed on the
figure; see the pyplot.legend docstring for valid values
"""
l = ax_.get_legend()
labels = [lb.get_text() for lb in list(l.get_texts())]
handles = l.legendHandles
ax_.legend(handles[::-1], labels[::-1], loc=legend_loc_)
# Before v1.3 default would read the value from prefereces.
if style == "default":
style = "overlap"
if color is not None:
if isinstance(color, str):
color = itertools.cycle([color])
elif hasattr(color, "__iter__"):
color = itertools.cycle(color)
else:
raise ValueError("Color must be None, a valid matplotlib color "
"string or a list of valid matplotlib colors.")
else:
if LooseVersion(mpl.__version__) >= "1.5.3":
color = itertools.cycle(
plt.rcParams['axes.prop_cycle'].by_key()["color"])
else:
color = itertools.cycle(plt.rcParams['axes.color_cycle'])
if line_style is not None:
if isinstance(line_style, str):
line_style = itertools.cycle([line_style])
elif hasattr(line_style, "__iter__"):
line_style = itertools.cycle(line_style)
else:
raise ValueError("line_style must be None, a valid matplotlib"
" line_style string or a list of valid matplotlib"
" line_style.")
else:
line_style = ['-'] * len(spectra)
if legend is not None:
if isinstance(legend, str):
if legend == 'auto':
legend = [spec.metadata.General.title for spec in spectra]
else:
raise ValueError("legend must be None, 'auto' or a list of"
" string")
elif hasattr(legend, "__iter__"):
legend = itertools.cycle(legend)
if style == 'overlap':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_overlap_plot(spectra,
ax,
color=color,
line_style=line_style,)
if legend is not None:
ax.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
if legend_picking is True:
animate_legend(fig=fig, ax=ax)
elif style == 'cascade':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_cascade_subplot(spectra,
ax,
color=color,
line_style=line_style,
padding=padding)
if legend is not None:
plt.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
elif style == 'mosaic':
default_fsize = plt.rcParams["figure.figsize"]
figsize = (default_fsize[0], default_fsize[1] * len(spectra))
fig, subplots = plt.subplots(
len(spectra), 1, figsize=figsize, **kwargs)
if legend is None:
legend = [legend] * len(spectra)
for spectrum, ax, color, line_style, legend in zip(
spectra, subplots, color, line_style, legend):
spectrum = _transpose_if_required(spectrum, 1)
_plot_spectrum(spectrum, ax, color=color, line_style=line_style)
ax.set_ylabel('Intensity')
if legend is not None:
ax.set_title(legend)
if not isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
if isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
fig.tight_layout()
elif style == 'heatmap':
if not isinstance(spectra, hyperspy.signal.BaseSignal):
import hyperspy.utils
spectra = [_transpose_if_required(spectrum, 1) for spectrum in
spectra]
spectra = hyperspy.utils.stack(spectra)
with spectra.unfolded():
ax = _make_heatmap_subplot(spectra)
ax.set_ylabel('Spectra')
ax = ax if style != "mosaic" else subplots
return ax
def animate_legend(fig=None, ax=None):
"""Animate the legend of a figure.
A spectrum can be toggle on and off by clicking on the legended line.
Parameters
----------
fig: None | matplotlib.figure
If None pick the current figure using "plt.gcf"
ax: None | matplotlib.axes
If None pick the current axes using "plt.gca".
Note
----
Code inspired from legend_picking.py in the matplotlib gallery
"""
if fig is None:
fig = plt.gcf()
if ax is None:
ax = plt.gca()
lines = ax.lines[::-1]
lined = dict()
leg = ax.get_legend()
for legline, origline in zip(leg.get_lines(), lines):
legline.set_picker(5) # 5 pts tolerance
lined[legline] = origline
def onpick(event):
# on the pick event, find the orig line corresponding to the
# legend proxy line, and toggle the visibility
legline = event.artist
if legline.axes == ax:
origline = lined[legline]
vis = not origline.get_visible()
origline.set_visible(vis)
# Change the alpha on the line in the legend so we can see what lines
# have been toggled
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('pick_event', onpick)
def plot_histograms(signal_list,
bins='freedman',
range_bins=None,
color=None,
line_style=None,
legend='auto',
fig=None,
**kwargs):
"""Plot the histogram of every signal in the list in the same figure.
This function creates a histogram for each signal and plot the list with
the `utils.plot.plot_spectra` function.
Parameters
----------
signal_list : iterable
Ordered spectra list to plot. If `style` is "cascade" or "mosaic"
the spectra can have different size and axes.
bins : int or list or str, optional
If bins is a string, then it must be one of:
'knuth' : use Knuth's rule to determine bins
'scotts' : use Scott's rule to determine bins
'freedman' : use the Freedman-diaconis rule to determine bins
'blocks' : use bayesian blocks for dynamic bin widths
range_bins : tuple or None, optional.
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
color : valid matplotlib color or a list of them or `None`, optional.
Sets the color of the lines of the plots. If a list, if its length is
less than the number of spectra to plot, the colors will be cycled. If
If `None`, use default matplotlib color cycle.
line_style: valid matplotlib line style or a list of them or `None`,
optional.
The main line style are '-','--','steps','-.',':'.
If a list, if its length is less than the number of
spectra to plot, line_style will be cycled. If
If `None`, use continuous lines, eg: ('-','--','steps','-.',':')
legend: None or list of str or 'auto', optional.
Display a legend. If 'auto', the title of each spectra
(metadata.General.title) is used.
legend_picking: bool, optional.
If true, a spectrum can be toggle on and off by clicking on
the legended line.
fig : matplotlib figure or None, optional.
If None, a default figure will be created.
**kwargs
other keyword arguments (weight and density) are described in
np.histogram().
Example
-------
Histograms of two random chi-square distributions
>>> img = hs.signals.Signal2D(np.random.chisquare(1,[10,10,100]))
>>> img2 = hs.signals.Signal2D(np.random.chisquare(2,[10,10,100]))
>>> hs.plot.plot_histograms([img,img2],legend=['hist1','hist2'])
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
hists = []
for obj in signal_list:
hists.append(obj.get_histogram(bins=bins,
range_bins=range_bins, **kwargs))
if line_style is None:
line_style = 'steps'
return plot_spectra(hists, style='overlap', color=color,
line_style=line_style, legend=legend, fig=fig)
|
gpl-3.0
| -7,764,825,860,367,261,000 | 8,985,263,166,371,552,000 | 36.986083 | 82 | 0.581131 | false |
rg3915/django-experience
|
djexperience/settings.py
|
1
|
3763
|
import os
from decouple import config, Csv
from dj_database_url import parse as dburl
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', default=[], cast=Csv())
# Application definition
INSTALLED_APPS = [
# my apps
'djexperience.core',
# default django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# thirty apps
'django_extensions',
'bootstrapform',
'widget_tweaks',
'daterange_filter',
'django_activeurl',
'import_export',
'django_tables2',
# my apps
'djexperience.bookstore',
'djexperience.company',
'djexperience.crm',
'djexperience.myemail',
'djexperience.product',
'djexperience.selling',
'djexperience.service',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djexperience.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djexperience.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
default_dburl = 'sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
DATABASES = {
'default': config('DATABASE_URL', default=default_dburl, cast=dburl),
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = config('DEFAULT_FROM_EMAIL')
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
USE_THOUSAND_SEPARATOR = True
DECIMAL_SEPARATOR = ','
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
LOGIN_URL = '/admin/login/'
|
mit
| 2,179,169,836,594,937,600 | 8,133,924,671,538,186,000 | 25.687943 | 91 | 0.687749 | false |
popazerty/obh-gui
|
lib/python/Components/RcModel.py
|
30
|
1436
|
import os
from Tools.HardwareInfo import HardwareInfo
from Tools.Directories import SCOPE_SKIN, resolveFilename
class RcModel:
RcModels = {}
def __init__(self):
self.model = HardwareInfo().get_device_model()
# cfg files has modelname rcname entries.
# modelname is boxname optionally followed by .rctype
for line in open((resolveFilename(SCOPE_SKIN, 'rc_models/rc_models.cfg')), 'r'):
if line.startswith(self.model):
m, r = line.strip().split()
self.RcModels[m] = r
def rcIsDefault(self):
# Default RC can only happen with DMM type remote controls...
return self.model.startswith('dm')
def getRcFile(self, ext):
# check for rc/type every time so rctype changes will be noticed
if os.path.exists('/proc/stb/ir/rc/type'):
rc = open('/proc/stb/ir/rc/type').read().strip()
modeltype = '%s.%s' % (self.model, rc)
else:
modeltype = None
if modeltype is not None and modeltype in self.RcModels.keys():
remote = self.RcModels[modeltype]
elif self.model in self.RcModels.keys():
remote = self.RcModels[self.model]
else:
remote = 'dmm' # default. Assume files for dmm exists
f = resolveFilename(SCOPE_SKIN, 'rc_models/' + remote + '.' + ext)
if not os.path.exists(f):
f = resolveFilename(SCOPE_SKIN, 'rc_models/dmm.' + ext)
return f
def getRcImg(self):
return self.getRcFile('png')
def getRcPositions(self):
return self.getRcFile('xml')
rc_model = RcModel()
|
gpl-2.0
| -3,724,443,730,721,428,500 | -5,892,664,297,687,208,000 | 30.217391 | 82 | 0.689415 | false |
tgroh/incubator-beam
|
sdks/python/apache_beam/examples/complete/juliaset/setup.py
|
4
|
4732
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Setup.py module for the workflow's worker utilities.
All the workflow related code is gathered in a package that will be built as a
source distribution, staged in the staging area for the workflow being run and
then installed in the workers when they start running.
This behavior is triggered by specifying the --setup_file command line option
when running the workflow for remote execution.
"""
from __future__ import print_function
import subprocess
from distutils.command.build import build as _build
import setuptools
# This class handles the pip install mechanism.
class build(_build): # pylint: disable=invalid-name
"""A build command class that will be invoked during package install.
The package built using the current setup.py will be staged and later
installed in the worker using `pip install package'. This class will be
instantiated during install for this specific scenario and will trigger
running the custom commands specified.
"""
sub_commands = _build.sub_commands + [('CustomCommands', None)]
# Some custom command to run during setup. The command is not essential for this
# workflow. It is used here as an example. Each command will spawn a child
# process. Typically, these commands will include steps to install non-Python
# packages. For instance, to install a C++-based library libjpeg62 the following
# two commands will have to be added:
#
# ['apt-get', 'update'],
# ['apt-get', '--assume-yes', 'install', 'libjpeg62'],
#
# First, note that there is no need to use the sudo command because the setup
# script runs with appropriate access.
# Second, if apt-get tool is used then the first command needs to be 'apt-get
# update' so the tool refreshes itself and initializes links to download
# repositories. Without this initial step the other apt-get install commands
# will fail with package not found errors. Note also --assume-yes option which
# shortcuts the interactive confirmation.
#
# Note that in this example custom commands will run after installing required
# packages. If you have a PyPI package that depends on one of the custom
# commands, move installation of the dependent package to the list of custom
# commands, e.g.:
#
# ['pip', 'install', 'my_package'],
#
# TODO(BEAM-3237): Output from the custom commands are missing from the logs.
# The output of custom commands (including failures) will be logged in the
# worker-startup log.
CUSTOM_COMMANDS = [
['echo', 'Custom command worked!']]
class CustomCommands(setuptools.Command):
"""A setuptools Command class able to run arbitrary commands."""
def initialize_options(self):
pass
def finalize_options(self):
pass
def RunCustomCommand(self, command_list):
print('Running command: %s' % command_list)
p = subprocess.Popen(
command_list,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Can use communicate(input='y\n'.encode()) if the command run requires
# some confirmation.
stdout_data, _ = p.communicate()
print('Command output: %s' % stdout_data)
if p.returncode != 0:
raise RuntimeError(
'Command %s failed: exit code: %s' % (command_list, p.returncode))
def run(self):
for command in CUSTOM_COMMANDS:
self.RunCustomCommand(command)
# Configure the required packages and scripts to install.
# Note that the Python Dataflow containers come with numpy already installed
# so this dependency will not trigger anything to be installed unless a version
# restriction is specified.
REQUIRED_PACKAGES = [
'numpy',
]
setuptools.setup(
name='juliaset',
version='0.0.1',
description='Julia set workflow package.',
install_requires=REQUIRED_PACKAGES,
packages=setuptools.find_packages(),
cmdclass={
# Command class instantiated and run during pip install scenarios.
'build': build,
'CustomCommands': CustomCommands,
}
)
|
apache-2.0
| -5,002,040,103,115,887,000 | -2,889,856,709,085,528,600 | 36.856 | 80 | 0.73732 | false |
linegpe/FYS3150
|
Project4/expect_random_T1.py
|
1
|
3161
|
import numpy as np
import matplotlib.pyplot as plt
data1 = np.loadtxt("expect_random_T1.00.dat")
data2 = np.loadtxt("expect_ordered_T1.00.dat")
data3 = np.loadtxt("expect_random2_T2.40.dat")
data4 = np.loadtxt("expect_ordered2_T2.40.dat")
values1 = data1[0::1]
values2 = data2[0::1]
values3 = data3[0::1]
values4 = data4[0::1]
N1 = len(values1)
x1 = np.linspace(0,N1,N1)
N2 = len(values3)
x2 = np.linspace(0,N2,N2)
figure1 = plt.figure()
labels = figure1.add_subplot(111)
# Turn off axis lines and ticks of the big subplot
labels.spines['top'].set_color('none')
labels.spines['bottom'].set_color('none')
labels.spines['left'].set_color('none')
labels.spines['right'].set_color('none')
labels.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
plt.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.ylabel("Mean energy per spin",fontsize=15)
#figure1.yaxis.set_ticks_position(right)
#figure1.ylabel.set_ticks_position('left')
#figure1.yaxis.tick_right()
fig1 = figure1.add_subplot(211)
fig1.plot(x1,values1[:,0],label="Random initial spins, T=1")
fig1.plot(x1,values2[:,0],label="Ordered initial spins, T=1")
fig1.tick_params(axis='x', labelsize=15) #HOW TO PUT THIS ON THE RIGHT SIDE?
fig1.tick_params(axis='y', labelsize=15)
fig1.yaxis.tick_right()
#plt.ylabel(r"$\langle E\rangle /L^2$",fontsize=17)
#plt.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.legend()
plt.axis([0,N1,-3,0])
#plt.show()
fig2 = figure1.add_subplot(212)
fig2.plot(x2,values3[:,0],label="Random initial spins, T=2.4")
fig2.plot(x2,values4[:,0],label="Ordered initial spins, T=2.4")
fig2.tick_params(axis='x', labelsize=15)
fig2.tick_params(axis='y', labelsize=15)
fig2.yaxis.tick_right()
#plt.ylabel(r"$\langle E\rangle /L^2$",fontsize=15)
#plt.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.legend()
plt.axis([0,50000,-2,-0.4])
plt.show()
figure2 = plt.figure()
labels = figure2.add_subplot(111)
labels.spines['top'].set_color('none')
labels.spines['bottom'].set_color('none')
labels.spines['left'].set_color('none')
labels.spines['right'].set_color('none')
labels.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
plt.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.ylabel("Absolute magnetization per spin",fontsize=15)
fig1 = figure2.add_subplot(211)
fig1.plot(x1,values1[:,1],label="Random initial spins, T=1")
fig1.plot(x1,values2[:,1],label="Ordered initial spins, T=1")
fig1.tick_params(axis='x', labelsize=15)
fig1.tick_params(axis='y', labelsize=15)
fig1.yaxis.tick_right()
#fig2.ylabel(r"$abs(\langle M \rangle /L^2)$",fontsize=15)
#fig2.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.legend()
plt.axis([0,N1,0.2,1.6])
#plt.show()
fig2 = figure2.add_subplot(212)
fig2.plot(x2,values3[:,1],label="Random initial spins, T=2.4")
fig2.plot(x2,values4[:,1],label="Ordered initial spins, T=2.4")
fig2.tick_params(axis='x', labelsize=15)
fig2.tick_params(axis='y', labelsize=15)
fig2.yaxis.tick_right()
#plt.ylabel(r"$abs(\langle M\rangle / L^2)$",fontsize=15)
#plt.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.legend()
#plt.axis([0,8e6,-0.1,1.4])
plt.show()
|
gpl-3.0
| -2,577,601,754,759,302,700 | 3,015,066,548,794,050,600 | 28.830189 | 84 | 0.708637 | false |
shoyer/xray
|
xarray/backends/locks.py
|
1
|
5397
|
import multiprocessing
import threading
import weakref
from typing import Any, MutableMapping
try:
from dask.utils import SerializableLock
except ImportError:
# no need to worry about serializing the lock
SerializableLock = threading.Lock
try:
from dask.distributed import Lock as DistributedLock
except ImportError:
DistributedLock = None
# Locks used by multiple backends.
# Neither HDF5 nor the netCDF-C library are thread-safe.
HDF5_LOCK = SerializableLock()
NETCDFC_LOCK = SerializableLock()
_FILE_LOCKS = weakref.WeakValueDictionary() # type: MutableMapping[Any, threading.Lock] # noqa
def _get_threaded_lock(key):
try:
lock = _FILE_LOCKS[key]
except KeyError:
lock = _FILE_LOCKS[key] = threading.Lock()
return lock
def _get_multiprocessing_lock(key):
# TODO: make use of the key -- maybe use locket.py?
# https://github.com/mwilliamson/locket.py
del key # unused
return multiprocessing.Lock()
_LOCK_MAKERS = {
None: _get_threaded_lock,
'threaded': _get_threaded_lock,
'multiprocessing': _get_multiprocessing_lock,
'distributed': DistributedLock,
}
def _get_lock_maker(scheduler=None):
"""Returns an appropriate function for creating resource locks.
Parameters
----------
scheduler : str or None
Dask scheduler being used.
See Also
--------
dask.utils.get_scheduler_lock
"""
return _LOCK_MAKERS[scheduler]
def _get_scheduler(get=None, collection=None):
"""Determine the dask scheduler that is being used.
None is returned if no dask scheduler is active.
See also
--------
dask.base.get_scheduler
"""
try:
# dask 0.18.1 and later
from dask.base import get_scheduler
actual_get = get_scheduler(get, collection)
except ImportError:
try:
from dask.utils import effective_get
actual_get = effective_get(get, collection)
except ImportError:
return None
try:
from dask.distributed import Client
if isinstance(actual_get.__self__, Client):
return 'distributed'
except (ImportError, AttributeError):
try:
import dask.multiprocessing
if actual_get == dask.multiprocessing.get:
return 'multiprocessing'
else:
return 'threaded'
except ImportError:
return 'threaded'
def get_write_lock(key):
"""Get a scheduler appropriate lock for writing to the given resource.
Parameters
----------
key : str
Name of the resource for which to acquire a lock. Typically a filename.
Returns
-------
Lock object that can be used like a threading.Lock object.
"""
scheduler = _get_scheduler()
lock_maker = _get_lock_maker(scheduler)
return lock_maker(key)
def acquire(lock, blocking=True):
"""Acquire a lock, possibly in a non-blocking fashion.
Includes backwards compatibility hacks for old versions of Python, dask
and dask-distributed.
"""
if blocking:
# no arguments needed
return lock.acquire()
elif DistributedLock is not None and isinstance(lock, DistributedLock):
# distributed.Lock doesn't support the blocking argument yet:
# https://github.com/dask/distributed/pull/2412
return lock.acquire(timeout=0)
else:
# "blocking" keyword argument not supported for:
# - threading.Lock on Python 2.
# - dask.SerializableLock with dask v1.0.0 or earlier.
# - multiprocessing.Lock calls the argument "block" instead.
return lock.acquire(blocking)
class CombinedLock:
"""A combination of multiple locks.
Like a locked door, a CombinedLock is locked if any of its constituent
locks are locked.
"""
def __init__(self, locks):
self.locks = tuple(set(locks)) # remove duplicates
def acquire(self, blocking=True):
return all(acquire(lock, blocking=blocking) for lock in self.locks)
def release(self):
for lock in self.locks:
lock.release()
def __enter__(self):
for lock in self.locks:
lock.__enter__()
def __exit__(self, *args):
for lock in self.locks:
lock.__exit__(*args)
def locked(self):
return any(lock.locked for lock in self.locks)
def __repr__(self):
return "CombinedLock(%r)" % list(self.locks)
class DummyLock:
"""DummyLock provides the lock API without any actual locking."""
def acquire(self, blocking=True):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
def locked(self):
return False
def combine_locks(locks):
"""Combine a sequence of locks into a single lock."""
all_locks = []
for lock in locks:
if isinstance(lock, CombinedLock):
all_locks.extend(lock.locks)
elif lock is not None:
all_locks.append(lock)
num_locks = len(all_locks)
if num_locks > 1:
return CombinedLock(all_locks)
elif num_locks == 1:
return all_locks[0]
else:
return DummyLock()
def ensure_lock(lock):
"""Ensure that the given object is a lock."""
if lock is None or lock is False:
return DummyLock()
return lock
|
apache-2.0
| 6,204,892,188,123,310,000 | -4,545,297,892,530,319,400 | 24.578199 | 96 | 0.631277 | false |
vodik/pacman
|
test/pacman/pmpkg.py
|
2
|
7696
|
#! /usr/bin/python2
#
# Copyright (c) 2006 by Aurelien Foret <[email protected]>
# Copyright (c) 2006-2013 Pacman Development Team <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import tempfile
import stat
import shutil
from StringIO import StringIO
import tarfile
import util
class pmpkg(object):
"""Package object.
Object holding data from an Arch Linux package.
"""
def __init__(self, name, version = "1.0-1"):
self.path = "" #the path of the generated package
# desc
self.name = name
self.version = version
self.desc = ""
self.groups = []
self.url = ""
self.license = []
self.arch = ""
self.builddate = ""
self.installdate = ""
self.packager = ""
self.size = 0
self.csize = 0
self.isize = 0
self.reason = 0
self.md5sum = "" # sync only
self.pgpsig = "" # sync only
self.replaces = []
self.depends = []
self.optdepends = []
self.conflicts = []
self.provides = []
# files
self.files = []
self.backup = []
# install
self.install = {
"pre_install": "",
"post_install": "",
"pre_remove": "",
"post_remove": "",
"pre_upgrade": "",
"post_upgrade": "",
}
self.path = None
self.finalized = False
def __str__(self):
s = ["%s" % self.fullname()]
s.append("description: %s" % self.desc)
s.append("url: %s" % self.url)
s.append("files: %s" % " ".join(self.files))
s.append("reason: %d" % self.reason)
return "\n".join(s)
def fullname(self):
"""Long name of a package.
Returns a string formatted as follows: "pkgname-pkgver".
"""
return "%s-%s" % (self.name, self.version)
def filename(self):
"""File name of a package, including its extension.
Returns a string formatted as follows: "pkgname-pkgver.PKG_EXT_PKG".
"""
return "%s%s" % (self.fullname(), util.PM_EXT_PKG)
@staticmethod
def parse_filename(name):
filename = name
if filename[-1] == "*":
filename = filename.rstrip("*")
if filename.find(" -> ") != -1:
filename, extra = filename.split(" -> ")
elif filename.find("|") != -1:
filename, extra = filename.split("|")
return filename
def makepkg(self, path):
"""Creates an Arch Linux package archive.
A package archive is generated in the location 'path', based on the data
from the object.
"""
archive_files = []
# .PKGINFO
data = ["pkgname = %s" % self.name]
data.append("pkgver = %s" % self.version)
data.append("pkgdesc = %s" % self.desc)
data.append("url = %s" % self.url)
data.append("builddate = %s" % self.builddate)
data.append("packager = %s" % self.packager)
data.append("size = %s" % self.size)
if self.arch:
data.append("arch = %s" % self.arch)
for i in self.license:
data.append("license = %s" % i)
for i in self.replaces:
data.append("replaces = %s" % i)
for i in self.groups:
data.append("group = %s" % i)
for i in self.depends:
data.append("depend = %s" % i)
for i in self.optdepends:
data.append("optdepend = %s" % i)
for i in self.conflicts:
data.append("conflict = %s" % i)
for i in self.provides:
data.append("provides = %s" % i)
for i in self.backup:
data.append("backup = %s" % i)
archive_files.append((".PKGINFO", "\n".join(data)))
# .INSTALL
if any(self.install.values()):
archive_files.append((".INSTALL", self.installfile()))
self.path = os.path.join(path, self.filename())
util.mkdir(os.path.dirname(self.path))
# Generate package metadata
tar = tarfile.open(self.path, "w:gz")
for name, data in archive_files:
info = tarfile.TarInfo(name)
info.size = len(data)
tar.addfile(info, StringIO(data))
# Generate package file system
for name in self.files:
fileinfo = util.getfileinfo(name)
info = tarfile.TarInfo(fileinfo["filename"])
if fileinfo["hasperms"]:
info.mode = fileinfo["perms"]
elif fileinfo["isdir"]:
info.mode = 0o755
if fileinfo["isdir"]:
info.type = tarfile.DIRTYPE
tar.addfile(info)
elif fileinfo["islink"]:
info.type = tarfile.SYMTYPE
info.linkname = fileinfo["link"]
tar.addfile(info)
else:
# TODO wow what a hack, adding a newline to match mkfile?
filedata = name + "\n"
info.size = len(filedata)
tar.addfile(info, StringIO(filedata))
tar.close()
def install_package(self, root):
"""Install the package in the given root."""
for f in self.files:
util.mkfile(root, f, f)
path = os.path.join(root, f)
if os.path.isfile(path):
os.utime(path, (355, 355))
def filelist(self):
"""Generate a list of package files."""
return sorted([self.parse_filename(f) for f in self.files])
def finalize(self):
"""Perform any necessary operations to ready the package for use."""
if self.finalized:
return
# add missing parent dirs to file list
# use bare file names so trailing ' -> ', '*', etc don't throw off the
# checks for existing files
file_names = self.filelist()
for name in list(file_names):
if os.path.isabs(name):
raise ValueError("Absolute path in filelist '%s'." % name)
name = os.path.dirname(name.rstrip("/"))
while name:
if name in file_names:
# path exists as both a file and a directory
raise ValueError("Duplicate path in filelist '%s'." % name)
elif name + "/" in file_names:
# path was either manually included or already processed
break
else:
file_names.append(name + "/")
self.files.append(name + "/")
name = os.path.dirname(name)
self.files.sort()
self.finalized = True
def local_backup_entries(self):
return ["%s\t%s" % (self.parse_filename(i), util.mkmd5sum(i)) for i in self.backup]
def installfile(self):
data = []
for key, value in self.install.items():
if value:
data.append("%s() {\n%s\n}\n" % (key, value))
return "\n".join(data)
# vim: set ts=4 sw=4 et:
|
gpl-2.0
| 3,345,720,038,771,347,500 | 140,395,266,587,255,740 | 32.172414 | 91 | 0.537032 | false |
notifico/notifico
|
notifico/services/messages.py
|
3
|
1916
|
# -*- coding: utf8 -*-
__all__ = ('MessageService',)
import json
class MessageService(object):
#: Key name for the outgoing message queue.
key_queue_messages = 'queue_message'
#: Key name for recent messages.
key_recent_messages = 'recent_messages'
def __init__(self, redis=None):
self._redis = redis
@property
def r(self):
return self._redis
def recent_messages(self, start=0, stop=-1):
"""
Returns a list of recent messages from `start` to `stop`.
"""
if not self.r:
return []
return [
json.loads(m) for m in self.r.lrange(
self.key_recent_messages, start, stop
)
]
def send_message(self, message, channel):
"""
Sends `message` to `channel`.
"""
final_message = {
# What we're delivering.
'type': 'message',
# Contents of the message.
'payload': {
'msg': message.replace('\n', '').replace('\r', '')
},
# Destination.
'channel': {
'channel': channel.channel,
'host': channel.host,
'port': channel.port,
'ssl': channel.ssl
}
}
message_dump = json.dumps(final_message)
self.r.rpush(self.key_queue_messages, message_dump)
def log_message(self, message, project, log_cap=200):
"""
Log up to `log_cap` messages,
"""
final_message = {
'msg': message,
'project_id': project.id,
'owner_id': project.owner.id
}
message_dump = json.dumps(final_message)
with self.r.pipeline() as pipe:
pipe.lpush(self.key_recent_messages, message_dump)
pipe.ltrim(self.key_recent_messages, 0, log_cap)
pipe.execute()
|
mit
| 2,819,069,612,042,817,000 | 2,368,480,064,774,569,500 | 27.176471 | 66 | 0.503653 | false |
brahle/eval2
|
scripts/haski/actions/reviewaction.py
|
1
|
1578
|
#!/usr/bin/env python3.2
# Copyright 2011 Bruno Rahle
#
# This file is part of Evaluator.
#
# Evaluator is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Evaluator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Evaluator. If not, see
# <http://www.gnu.org/licenses/>.
from actions.baseaction import BaseHaskiAction
import argparse
class ReviewAction(BaseHaskiAction):
"""This class is the class that does linting work.
"""
RB_ID_STR = 'reviewboard id'
def __call__(self, params):
"""Fetches the desired revision and then sends it to reviewboard.
"""
commit = self.get_commit(params)
if not params.skip_lint:
commit.lint(params)
rb_id = commit.review()
if params.revision != 'HEAD':
if self.RB_ID_STR not in commit.message.fields:
print('[WARNING] Please edit the message to incorporate',
'`ReviewBoardID` field.')
else:
commit.message.set_field(self.RB_ID_STR, rb_id)
commit.amend()
def main():
pass
if __name__ == '__main__':
main()
|
agpl-3.0
| 8,390,026,955,718,450,000 | 4,399,183,238,700,286,000 | 31.204082 | 73 | 0.665399 | false |
crosswalk-project/chromium-crosswalk-efl
|
tools/telemetry/telemetry/core/platform/profiler/android_profiling_helper_unittest.py
|
26
|
4767
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import pickle
import re
import shutil
import tempfile
import unittest
from telemetry import benchmark
from telemetry.core import util
from telemetry.core.platform.profiler import android_profiling_helper
from telemetry.unittest import simple_mock
from telemetry.unittest import tab_test_case
def _GetLibrariesMappedIntoProcesses(device, pids):
libs = set()
for pid in pids:
maps_file = '/proc/%d/maps' % pid
maps = device.ReadFile(maps_file, as_root=True)
for map_line in maps:
lib = re.match(r'.*\s(/.*[.]so)$', map_line)
if lib:
libs.add(lib.group(1))
return libs
class TestAndroidProfilingHelper(unittest.TestCase):
def testGetRequiredLibrariesForPerfProfile(self):
perf_output = os.path.join(
util.GetUnittestDataDir(), 'sample_perf_report_output.txt')
with open(perf_output) as f:
perf_output = f.read()
mock_popen = simple_mock.MockObject()
mock_popen.ExpectCall('communicate').WillReturn([None, perf_output])
mock_subprocess = simple_mock.MockObject()
mock_subprocess.ExpectCall(
'Popen').WithArgs(simple_mock.DONT_CARE).WillReturn(mock_popen)
mock_subprocess.SetAttribute('PIPE', simple_mock.MockObject())
real_subprocess = android_profiling_helper.subprocess
android_profiling_helper.subprocess = mock_subprocess
try:
libs = android_profiling_helper.GetRequiredLibrariesForPerfProfile('foo')
self.assertEqual(libs, set([
'/data/app-lib/com.google.android.apps.chrome-2/libchrome.2016.0.so',
'/system/lib/libart.so',
'/system/lib/libc.so',
'/system/lib/libm.so']))
finally:
android_profiling_helper.subprocess = real_subprocess
@benchmark.Enabled('android')
def testGetRequiredLibrariesForVTuneProfile(self):
vtune_db_output = os.path.join(
util.GetUnittestDataDir(), 'sample_vtune_db_output')
with open(vtune_db_output, 'rb') as f:
vtune_db_output = pickle.load(f)
mock_cursor = simple_mock.MockObject()
mock_cursor.ExpectCall(
'execute').WithArgs(simple_mock.DONT_CARE).WillReturn(vtune_db_output)
mock_conn = simple_mock.MockObject()
mock_conn.ExpectCall('cursor').WillReturn(mock_cursor)
mock_conn.ExpectCall('close')
mock_sqlite3 = simple_mock.MockObject()
mock_sqlite3.ExpectCall(
'connect').WithArgs(simple_mock.DONT_CARE).WillReturn(mock_conn)
real_sqlite3 = android_profiling_helper.sqlite3
android_profiling_helper.sqlite3 = mock_sqlite3
try:
libs = android_profiling_helper.GetRequiredLibrariesForVTuneProfile('foo')
self.assertEqual(libs, set([
'/data/app-lib/com.google.android.apps.chrome-1/libchrome.2019.0.so',
'/system/lib/libdvm.so',
'/system/lib/libc.so',
'/system/lib/libm.so']))
finally:
android_profiling_helper.sqlite3 = real_sqlite3
class TestAndroidProfilingHelperTabTestCase(tab_test_case.TabTestCase):
def setUp(self):
super(TestAndroidProfilingHelperTabTestCase, self).setUp()
# pylint: disable=W0212
browser_backend = self._browser._browser_backend
self._device = browser_backend._adb.device()
@benchmark.Enabled('android')
def testCreateSymFs(self):
# pylint: disable=W0212
browser_pid = self._browser._browser_backend.pid
pids = ([browser_pid] +
self._browser._platform_backend.GetChildPids(browser_pid))
libs = _GetLibrariesMappedIntoProcesses(self._device, pids)
assert libs
symfs_dir = tempfile.mkdtemp()
try:
kallsyms = android_profiling_helper.CreateSymFs(self._device, symfs_dir,
libs)
# Check that we have kernel symbols.
assert os.path.exists(kallsyms)
is_unstripped = re.compile('^/data/app/.*\.so$')
has_unstripped = False
# Check that all requested libraries are present.
for lib in libs:
has_unstripped = has_unstripped or is_unstripped.match(lib)
assert os.path.exists(os.path.join(symfs_dir, lib[1:])), \
'%s not found in symfs' % lib
# Make sure we found at least one unstripped library.
assert has_unstripped
finally:
shutil.rmtree(symfs_dir)
@benchmark.Enabled('android')
def testGetToolchainBinaryPath(self):
with tempfile.NamedTemporaryFile() as libc:
self._device.PullFile('/system/lib/libc.so', libc.name)
path = android_profiling_helper.GetToolchainBinaryPath(libc.name,
'objdump')
assert os.path.exists(path)
|
bsd-3-clause
| -953,835,931,837,098,500 | 1,231,513,313,077,413,600 | 34.051471 | 80 | 0.679253 | false |
skidzo/sympy
|
sympy/simplify/tests/test_powsimp.py
|
9
|
11985
|
from sympy import (
symbols, powsimp, symbols, MatrixSymbol, sqrt, pi, Mul, gamma, Function,
S, I, exp, simplify, sin, E, log, hyper, Symbol, Dummy, powdenest, root,
Rational)
from sympy.abc import x, y, z, t, a, b, c, d, e, f, g, h, i, k
def test_powsimp():
x, y, z, n = symbols('x,y,z,n')
f = Function('f')
assert powsimp( 4**x * 2**(-x) * 2**(-x) ) == 1
assert powsimp( (-4)**x * (-2)**(-x) * 2**(-x) ) == 1
assert powsimp(
f(4**x * 2**(-x) * 2**(-x)) ) == f(4**x * 2**(-x) * 2**(-x))
assert powsimp( f(4**x * 2**(-x) * 2**(-x)), deep=True ) == f(1)
assert exp(x)*exp(y) == exp(x)*exp(y)
assert powsimp(exp(x)*exp(y)) == exp(x + y)
assert powsimp(exp(x)*exp(y)*2**x*2**y) == (2*E)**(x + y)
assert powsimp(exp(x)*exp(y)*2**x*2**y, combine='exp') == \
exp(x + y)*2**(x + y)
assert powsimp(exp(x)*exp(y)*exp(2)*sin(x) + sin(y) + 2**x*2**y) == \
exp(2 + x + y)*sin(x) + sin(y) + 2**(x + y)
assert powsimp(sin(exp(x)*exp(y))) == sin(exp(x)*exp(y))
assert powsimp(sin(exp(x)*exp(y)), deep=True) == sin(exp(x + y))
assert powsimp(x**2*x**y) == x**(2 + y)
# This should remain factored, because 'exp' with deep=True is supposed
# to act like old automatic exponent combining.
assert powsimp((1 + E*exp(E))*exp(-E), combine='exp', deep=True) == \
(1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), deep=True) == \
(1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E)) == (1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), combine='exp') == \
(1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), combine='base') == \
(1 + E*exp(E))*exp(-E)
x, y = symbols('x,y', nonnegative=True)
n = Symbol('n', real=True)
assert powsimp(y**n * (y/x)**(-n)) == x**n
assert powsimp(x**(x**(x*y)*y**(x*y))*y**(x**(x*y)*y**(x*y)), deep=True) \
== (x*y)**(x*y)**(x*y)
assert powsimp(2**(2**(2*x)*x), deep=False) == 2**(2**(2*x)*x)
assert powsimp(2**(2**(2*x)*x), deep=True) == 2**(x*4**x)
assert powsimp(
exp(-x + exp(-x)*exp(-x*log(x))), deep=False, combine='exp') == \
exp(-x + exp(-x)*exp(-x*log(x)))
assert powsimp(
exp(-x + exp(-x)*exp(-x*log(x))), deep=False, combine='exp') == \
exp(-x + exp(-x)*exp(-x*log(x)))
assert powsimp((x + y)/(3*z), deep=False, combine='exp') == (x + y)/(3*z)
assert powsimp((x/3 + y/3)/z, deep=True, combine='exp') == (x/3 + y/3)/z
assert powsimp(exp(x)/(1 + exp(x)*exp(y)), deep=True) == \
exp(x)/(1 + exp(x + y))
assert powsimp(x*y**(z**x*z**y), deep=True) == x*y**(z**(x + y))
assert powsimp((z**x*z**y)**x, deep=True) == (z**(x + y))**x
assert powsimp(x*(z**x*z**y)**x, deep=True) == x*(z**(x + y))**x
p = symbols('p', positive=True)
assert powsimp((1/x)**log(2)/x) == (1/x)**(1 + log(2))
assert powsimp((1/p)**log(2)/p) == p**(-1 - log(2))
# coefficient of exponent can only be simplified for positive bases
assert powsimp(2**(2*x)) == 4**x
assert powsimp((-1)**(2*x)) == (-1)**(2*x)
i = symbols('i', integer=True)
assert powsimp((-1)**(2*i)) == 1
assert powsimp((-1)**(-x)) != (-1)**x # could be 1/((-1)**x), but is not
# force=True overrides assumptions
assert powsimp((-1)**(2*x), force=True) == 1
# rational exponents allow combining of negative terms
w, n, m = symbols('w n m', negative=True)
e = i/a # not a rational exponent if `a` is unknown
ex = w**e*n**e*m**e
assert powsimp(ex) == m**(i/a)*n**(i/a)*w**(i/a)
e = i/3
ex = w**e*n**e*m**e
assert powsimp(ex) == (-1)**i*(-m*n*w)**(i/3)
e = (3 + i)/i
ex = w**e*n**e*m**e
assert powsimp(ex) == (-1)**(3*e)*(-m*n*w)**e
eq = x**(2*a/3)
# eq != (x**a)**(2/3) (try x = -1 and a = 3 to see)
assert powsimp(eq).exp == eq.exp == 2*a/3
# powdenest goes the other direction
assert powsimp(2**(2*x)) == 4**x
assert powsimp(exp(p/2)) == exp(p/2)
# issue 6368
eq = Mul(*[sqrt(Dummy(imaginary=True)) for i in range(3)])
assert powsimp(eq) == eq and eq.is_Mul
assert all(powsimp(e) == e for e in (sqrt(x**a), sqrt(x**2)))
# issue 8836
assert str( powsimp(exp(I*pi/3)*root(-1,3)) ) == '(-1)**(2/3)'
def test_powsimp_negated_base():
assert powsimp((-x + y)/sqrt(x - y)) == -sqrt(x - y)
assert powsimp((-x + y)*(-z + y)/sqrt(x - y)/sqrt(z - y)) == sqrt(x - y)*sqrt(z - y)
p = symbols('p', positive=True)
assert powsimp((-p)**a/p**a) == (-1)**a
n = symbols('n', negative=True)
assert powsimp((-n)**a/n**a) == (-1)**a
# if x is 0 then the lhs is 0**a*oo**a which is not (-1)**a
assert powsimp((-x)**a/x**a) != (-1)**a
def test_powsimp_nc():
x, y, z = symbols('x,y,z')
A, B, C = symbols('A B C', commutative=False)
assert powsimp(A**x*A**y, combine='all') == A**(x + y)
assert powsimp(A**x*A**y, combine='base') == A**x*A**y
assert powsimp(A**x*A**y, combine='exp') == A**(x + y)
assert powsimp(A**x*B**x, combine='all') == A**x*B**x
assert powsimp(A**x*B**x, combine='base') == A**x*B**x
assert powsimp(A**x*B**x, combine='exp') == A**x*B**x
assert powsimp(B**x*A**x, combine='all') == B**x*A**x
assert powsimp(B**x*A**x, combine='base') == B**x*A**x
assert powsimp(B**x*A**x, combine='exp') == B**x*A**x
assert powsimp(A**x*A**y*A**z, combine='all') == A**(x + y + z)
assert powsimp(A**x*A**y*A**z, combine='base') == A**x*A**y*A**z
assert powsimp(A**x*A**y*A**z, combine='exp') == A**(x + y + z)
assert powsimp(A**x*B**x*C**x, combine='all') == A**x*B**x*C**x
assert powsimp(A**x*B**x*C**x, combine='base') == A**x*B**x*C**x
assert powsimp(A**x*B**x*C**x, combine='exp') == A**x*B**x*C**x
assert powsimp(B**x*A**x*C**x, combine='all') == B**x*A**x*C**x
assert powsimp(B**x*A**x*C**x, combine='base') == B**x*A**x*C**x
assert powsimp(B**x*A**x*C**x, combine='exp') == B**x*A**x*C**x
def test_issue_6440():
assert powsimp(16*2**a*8**b) == 2**(a + 3*b + 4)
def test_powdenest():
from sympy import powdenest
from sympy.abc import x, y, z, a, b
p, q = symbols('p q', positive=True)
i, j = symbols('i,j', integer=True)
assert powdenest(x) == x
assert powdenest(x + 2*(x**(2*a/3))**(3*x)) == (x + 2*(x**(2*a/3))**(3*x))
assert powdenest((exp(2*a/3))**(3*x)) # -X-> (exp(a/3))**(6*x)
assert powdenest((x**(2*a/3))**(3*x)) == ((x**(2*a/3))**(3*x))
assert powdenest(exp(3*x*log(2))) == 2**(3*x)
assert powdenest(sqrt(p**2)) == p
i, j = symbols('i,j', integer=True)
eq = p**(2*i)*q**(4*i)
assert powdenest(eq) == (p*q**2)**(2*i)
# -X-> (x**x)**i*(x**x)**j == x**(x*(i + j))
assert powdenest((x**x)**(i + j))
assert powdenest(exp(3*y*log(x))) == x**(3*y)
assert powdenest(exp(y*(log(a) + log(b)))) == (a*b)**y
assert powdenest(exp(3*(log(a) + log(b)))) == a**3*b**3
assert powdenest(((x**(2*i))**(3*y))**x) == ((x**(2*i))**(3*y))**x
assert powdenest(((x**(2*i))**(3*y))**x, force=True) == x**(6*i*x*y)
assert powdenest(((x**(2*a/3))**(3*y/i))**x) == \
(((x**(2*a/3))**(3*y/i))**x)
assert powdenest((x**(2*i)*y**(4*i))**z, force=True) == (x*y**2)**(2*i*z)
assert powdenest((p**(2*i)*q**(4*i))**j) == (p*q**2)**(2*i*j)
e = ((p**(2*a))**(3*y))**x
assert powdenest(e) == e
e = ((x**2*y**4)**a)**(x*y)
assert powdenest(e) == e
e = (((x**2*y**4)**a)**(x*y))**3
assert powdenest(e) == ((x**2*y**4)**a)**(3*x*y)
assert powdenest((((x**2*y**4)**a)**(x*y)), force=True) == \
(x*y**2)**(2*a*x*y)
assert powdenest((((x**2*y**4)**a)**(x*y))**3, force=True) == \
(x*y**2)**(6*a*x*y)
assert powdenest((x**2*y**6)**i) != (x*y**3)**(2*i)
x, y = symbols('x,y', positive=True)
assert powdenest((x**2*y**6)**i) == (x*y**3)**(2*i)
assert powdenest((x**(2*i/3)*y**(i/2))**(2*i)) == (x**(S(4)/3)*y)**(i**2)
assert powdenest(sqrt(x**(2*i)*y**(6*i))) == (x*y**3)**i
assert powdenest(4**x) == 2**(2*x)
assert powdenest((4**x)**y) == 2**(2*x*y)
assert powdenest(4**x*y) == 2**(2*x)*y
def test_powdenest_polar():
x, y, z = symbols('x y z', polar=True)
a, b, c = symbols('a b c')
assert powdenest((x*y*z)**a) == x**a*y**a*z**a
assert powdenest((x**a*y**b)**c) == x**(a*c)*y**(b*c)
assert powdenest(((x**a)**b*y**c)**c) == x**(a*b*c)*y**(c**2)
def test_issue_5805():
arg = ((gamma(x)*hyper((), (), x))*pi)**2
assert powdenest(arg) == (pi*gamma(x)*hyper((), (), x))**2
assert arg.is_positive is None
def test_issue_9324_powsimp_on_matrix_symbol():
M = MatrixSymbol('M', 10, 10)
expr = powsimp(M, deep=True)
assert expr == M
assert expr.args[0] == 'M'
def test_issue_6367():
z = -5*sqrt(2)/(2*sqrt(2*sqrt(29) + 29)) + sqrt(-sqrt(29)/29 + S(1)/2)
assert Mul(*[powsimp(a) for a in Mul.make_args(z.normal())]) == 0
assert powsimp(z.normal()) == 0
assert simplify(z) == 0
assert powsimp(sqrt(2 + sqrt(3))*sqrt(2 - sqrt(3)) + 1) == 2
assert powsimp(z) != 0
def test_powsimp_polar():
from sympy import polar_lift, exp_polar
x, y, z = symbols('x y z')
p, q, r = symbols('p q r', polar=True)
assert (polar_lift(-1))**(2*x) == exp_polar(2*pi*I*x)
assert powsimp(p**x * q**x) == (p*q)**x
assert p**x * (1/p)**x == 1
assert (1/p)**x == p**(-x)
assert exp_polar(x)*exp_polar(y) == exp_polar(x)*exp_polar(y)
assert powsimp(exp_polar(x)*exp_polar(y)) == exp_polar(x + y)
assert powsimp(exp_polar(x)*exp_polar(y)*p**x*p**y) == \
(p*exp_polar(1))**(x + y)
assert powsimp(exp_polar(x)*exp_polar(y)*p**x*p**y, combine='exp') == \
exp_polar(x + y)*p**(x + y)
assert powsimp(
exp_polar(x)*exp_polar(y)*exp_polar(2)*sin(x) + sin(y) + p**x*p**y) \
== p**(x + y) + sin(x)*exp_polar(2 + x + y) + sin(y)
assert powsimp(sin(exp_polar(x)*exp_polar(y))) == \
sin(exp_polar(x)*exp_polar(y))
assert powsimp(sin(exp_polar(x)*exp_polar(y)), deep=True) == \
sin(exp_polar(x + y))
def test_issue_5728():
b = x*sqrt(y)
a = sqrt(b)
c = sqrt(sqrt(x)*y)
assert powsimp(a*b) == sqrt(b)**3
assert powsimp(a*b**2*sqrt(y)) == sqrt(y)*a**5
assert powsimp(a*x**2*c**3*y) == c**3*a**5
assert powsimp(a*x*c**3*y**2) == c**7*a
assert powsimp(x*c**3*y**2) == c**7
assert powsimp(x*c**3*y) == x*y*c**3
assert powsimp(sqrt(x)*c**3*y) == c**5
assert powsimp(sqrt(x)*a**3*sqrt(y)) == sqrt(x)*sqrt(y)*a**3
assert powsimp(Mul(sqrt(x)*c**3*sqrt(y), y, evaluate=False)) == \
sqrt(x)*sqrt(y)**3*c**3
assert powsimp(a**2*a*x**2*y) == a**7
# symbolic powers work, too
b = x**y*y
a = b*sqrt(b)
assert a.is_Mul is True
assert powsimp(a) == sqrt(b)**3
# as does exp
a = x*exp(2*y/3)
assert powsimp(a*sqrt(a)) == sqrt(a)**3
assert powsimp(a**2*sqrt(a)) == sqrt(a)**5
assert powsimp(a**2*sqrt(sqrt(a))) == sqrt(sqrt(a))**9
def test_issue_from_PR1599():
n1, n2, n3, n4 = symbols('n1 n2 n3 n4', negative=True)
assert (powsimp(sqrt(n1)*sqrt(n2)*sqrt(n3)) ==
-I*sqrt(-n1)*sqrt(-n2)*sqrt(-n3))
assert (powsimp(root(n1, 3)*root(n2, 3)*root(n3, 3)*root(n4, 3)) ==
-(-1)**(S(1)/3)*
(-n1)**(S(1)/3)*(-n2)**(S(1)/3)*(-n3)**(S(1)/3)*(-n4)**(S(1)/3))
def test_issue_10195():
a = Symbol('a', integer=True)
l = Symbol('l', even=True, nonzero=True)
n = Symbol('n', odd=True)
e_x = (-1)**(n/2 - Rational(1, 2)) - (-1)**(3*n/2 - Rational(1, 2))
assert powsimp((-1)**(l/2)) == I**l
assert powsimp((-1)**(n/2)) == I**n
assert powsimp((-1)**(3*n/2)) == -I**n
assert powsimp(e_x) == (-1)**(n/2 - Rational(1, 2)) + (-1)**(3*n/2 +
Rational(1,2))
assert powsimp((-1)**(3*a/2)) == (-I)**a
def test_issue_11981():
x, y = symbols('x y', commutative=False)
assert powsimp((x*y)**2 * (y*x)**2) == (x*y)**2 * (y*x)**2
|
bsd-3-clause
| 1,393,987,790,236,032,300 | 5,747,668,236,843,609,000 | 38.817276 | 88 | 0.501627 | false |
pelodelfuego/word2vec-toolbox
|
toolbox/mlLib/conceptPairFeature.py
|
1
|
4358
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import __init__
import numpy as np
from scipy.weave import inline
from sklearn.ensemble import RandomForestClassifier
import cpLib.concept as cp
import utils.skUtils as sku
# PROJECTION
def projCosSim(c1, c2):
v1 = c1.vect
v2 = c2.vect
dimCount = len(v1)
arr = np.zeros(dimCount, 'f')
code = """
for(int i = 0; i < dimCount; i++) {
float norm_v1 = 0.0;
float norm_v2 = 0.0;
float dot_pdt = 0.0;
for(int j = 0; j < dimCount; j++) {
if(i != j) {
dot_pdt += v1[j] * v2[j];
norm_v1 += v1[j] * v1[j];
norm_v2 += v2[j] * v2[j];
}
}
norm_v1 = sqrtf(norm_v1);
norm_v2 = sqrtf(norm_v2);
arr[i] = dot_pdt / norm_v1 / norm_v2;
}
return_val = 1;
"""
inline(code, ['v1', 'v2', 'dimCount', 'arr'], headers = ['<math.h>'], compiler = 'gcc')
return arr
def projEuclDist(c1, c2):
v1 = c1.vect
v2 = c2.vect
dimCount = len(v1)
arr = np.zeros(dimCount, 'f')
code = """
for(int i = 0; i < dimCount; i++) {
float dist = 0.0;
for(int j = 0; j < dimCount; j++) {
if(i != j) {
dist += pow(v1[j] - v2[j], 2);
}
}
arr[i] = sqrt(dist);
}
return_val = 1;
"""
inline(code, ['v1', 'v2', 'dimCount', 'arr'], headers = ['<math.h>'], compiler = 'gcc')
return arr
def projManaDist(c1, c2):
v1 = c1.vect
v2 = c2.vect
dimCount = len(v1)
arr = np.zeros(dimCount, 'f')
code = """
for(int i = 0; i < dimCount; i++) {
float dist = 0.0;
for(int j = 0; j < dimCount; j++) {
if(i != j) {
dist += fabs(v1[i] - v2[i]);
}
}
arr[i] = dist;
}
return_val = 1;
"""
inline(code, ['v1', 'v2', 'dimCount', 'arr'], headers = ['<math.h>'], compiler = 'gcc')
return arr
# COMMUTATIVE FEATURE
def subCarth(conceptPair):
return conceptPair[2].vect - conceptPair[0].vect
def subPolar(conceptPair):
return conceptPair[2].polarVect() - conceptPair[0].polarVect()
def subAngular(conceptPair):
return conceptPair[2].angularVect() - conceptPair[0].angularVect()
def concatCarth(conceptPair):
return np.concatenate((conceptPair[0].vect, conceptPair[2].vect))
def concatPolar(conceptPair):
return np.concatenate((conceptPair[0].polarVect(), conceptPair[2].polarVect()))
def concatAngular(conceptPair):
return np.concatenate((conceptPair[0].angularVect(), conceptPair[2].angularVect()))
# NON COMMUATIVE FEATURE
# PROJECTION SIMILARITY
def pCosSim(conceptPair):
return projCosSim(conceptPair[0], conceptPair[2])
def pEuclDist(conceptPair):
return projEuclDist(conceptPair[0], conceptPair[2])
def pManaDist(conceptPair):
return projManaDist(conceptPair[0], conceptPair[2])
# PROJECTION DISSIMILARITY
def _projectionDissimarilty(projectionMetric, globalMetric, conceptPair):
projectedFeature = projectionMetric(conceptPair[0], conceptPair[2])
globalFeature = globalMetric(conceptPair[0], conceptPair[2])
return np.array([(globalFeature - v) for v in projectedFeature])
def pdCosSim(conceptPair):
return _projectionDissimarilty(projCosSim, cp.cosSim, conceptPair)
def pdEuclDist(conceptPair):
return _projectionDissimarilty(projEuclDist, cp.euclDist, conceptPair)
def pdManaDist(conceptPair):
return _projectionDissimarilty(projManaDist, cp.manaDist, conceptPair)
# CLF
class ConceptPairClf(object):
def __init__(self, clf, featureExtractionFct):
self.clf = clf
self.featureExtractionFct = featureExtractionFct
def fit(self, X, y):
self.clf.fit([self.featureExtractionFct(x) for x in X], y)
self.classes_ = self.clf.classes_
def predict(self, X):
return self.clf.predict([self.featureExtractionFct(x) for x in X])
def predict_proba(self, X):
return self.clf.predict_proba([self.featureExtractionFct(x) for x in X])
|
gpl-3.0
| 1,538,765,610,746,357,500 | 5,395,825,501,198,471,000 | 26.2375 | 91 | 0.562184 | false |
skodapetr/lbvs-environment
|
scripts/libs/core.py
|
1
|
1664
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import csv
import os
import logging
import gzip
__license__ = "X11"
def init_logging():
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] - %(message)s',
datefmt='%H:%M:%S')
def create_directory(path):
if not os.path.exists(path) and not path == "":
os.makedirs(path)
def create_parent_directory(path):
parent_directory = os.path.dirname(path)
if not os.path.exists(parent_directory) and not parent_directory == "":
os.makedirs(parent_directory)
def read_json(path):
if path.endswith(".gz"):
with gzip.open(path, "rt") as stream:
return json.load(stream)
else:
with open(path, "r") as stream:
return json.load(stream)
def write_json(path, object_to_write):
create_parent_directory(path)
if path.endswith(".gz"):
with gzip.open(path, "wt") as stream:
json.dump(object_to_write, stream, indent=2)
else:
with open(path, "w") as stream:
json.dump(object_to_write, stream, indent=2)
def read_csv_as_object(path):
"""
Read CSV lines as objects.
"""
results = []
with open(path) as stream:
reader = csv.reader(stream, delimiter=",", quotechar='"')
header = next(reader)
for row in reader:
new_object = {}
for index in range(0, len(row)):
new_object[header[index]] = row[index]
results.append(new_object)
return results
if __name__ == "__main__":
raise Exception("This module can be used only as a library!")
|
mit
| -7,574,750,422,789,972,000 | 9,169,770,703,010,959,000 | 23.470588 | 75 | 0.590144 | false |
willbarton/regulations-site
|
regulations/generator/layers/interpretations.py
|
7
|
2479
|
from django.http import HttpRequest
# Don't import PartialInterpView or utils directly; causes an import cycle
from regulations import generator, views
from regulations.generator.node_types import label_to_text
from regulations.generator.section_url import SectionUrl
class InterpretationsLayer(object):
"""Fetches the (rendered) interpretation for this node, if available"""
shorthand = 'interp'
def __init__(self, layer, version=None):
self.layer = layer
self.version = version
self.section_url = SectionUrl()
self.root_interp_label = None
self.partial_view = None
def preprocess_root(self, root):
"""The root label will allow us to use a single set of layer
appliers and grab all interp data at once."""
self.root_interp_label = '-'.join(root['label'] + ['Interp'])
view_class = views.partial_interp.PartialInterpView
self.partial_view = view_class.as_view(
inline=True, appliers=view_class.mk_appliers(
self.root_interp_label, self.version))
def apply_layer(self, text_index):
"""Return a pair of field-name + interpretation if one applies."""
if text_index in self.layer and self.layer[text_index]:
context = {'interps': [],
'for_markup_id': text_index,
'for_label': label_to_text(text_index.split('-'),
include_section=False)}
# Force caching of a few nodes up -- should prevent a request
# per interpretation if caching is on
generator.generator.get_tree_paragraph(
self.root_interp_label, self.version)
for layer_element in self.layer[text_index]:
reference = layer_element['reference']
request = HttpRequest()
request.method = 'GET'
response = self.partial_view(request, label_id=reference,
version=self.version)
response.render()
interp = {
'label_id': reference,
'markup': response.content,
}
ref_parts = reference.split('-')
interp['section_id'] = self.section_url.interp(
ref_parts, self.version)
context['interps'].append(interp)
return 'interp', context
|
cc0-1.0
| -5,144,989,199,328,182,000 | -7,329,536,068,800,499,000 | 40.316667 | 76 | 0.572408 | false |
kaniblu/hangul-utils
|
hangul_utils/unicode.py
|
1
|
8775
|
__all__ = ["split_syllable_char", "split_syllables",
"join_jamos", "join_jamos_char",
"CHAR_INITIALS", "CHAR_MEDIALS", "CHAR_FINALS"]
import itertools
INITIAL = 0x001
MEDIAL = 0x010
FINAL = 0x100
CHAR_LISTS = {
INITIAL: list(map(chr, [
0x3131, 0x3132, 0x3134, 0x3137, 0x3138, 0x3139,
0x3141, 0x3142, 0x3143, 0x3145, 0x3146, 0x3147,
0x3148, 0x3149, 0x314a, 0x314b, 0x314c, 0x314d,
0x314e
])),
MEDIAL: list(map(chr, [
0x314f, 0x3150, 0x3151, 0x3152, 0x3153, 0x3154,
0x3155, 0x3156, 0x3157, 0x3158, 0x3159, 0x315a,
0x315b, 0x315c, 0x315d, 0x315e, 0x315f, 0x3160,
0x3161, 0x3162, 0x3163
])),
FINAL: list(map(chr, [
0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136,
0x3137, 0x3139, 0x313a, 0x313b, 0x313c, 0x313d,
0x313e, 0x313f, 0x3140, 0x3141, 0x3142, 0x3144,
0x3145, 0x3146, 0x3147, 0x3148, 0x314a, 0x314b,
0x314c, 0x314d, 0x314e
]))
}
CHAR_INITIALS = CHAR_LISTS[INITIAL]
CHAR_MEDIALS = CHAR_LISTS[MEDIAL]
CHAR_FINALS = CHAR_LISTS[FINAL]
CHAR_SETS = {k: set(v) for k, v in CHAR_LISTS.items()}
CHARSET = set(itertools.chain(*CHAR_SETS.values()))
CHAR_INDICES = {k: {c: i for i, c in enumerate(v)}
for k, v in CHAR_LISTS.items()}
def is_hangul_syllable(c):
return 0xac00 <= ord(c) <= 0xd7a3 # Hangul Syllables
def is_hangul_jamo(c):
return 0x1100 <= ord(c) <= 0x11ff # Hangul Jamo
def is_hangul_compat_jamo(c):
return 0x3130 <= ord(c) <= 0x318f # Hangul Compatibility Jamo
def is_hangul_jamo_exta(c):
return 0xa960 <= ord(c) <= 0xa97f # Hangul Jamo Extended-A
def is_hangul_jamo_extb(c):
return 0xd7b0 <= ord(c) <= 0xd7ff # Hangul Jamo Extended-B
def is_hangul(c):
return (is_hangul_syllable(c) or
is_hangul_jamo(c) or
is_hangul_compat_jamo(c) or
is_hangul_jamo_exta(c) or
is_hangul_jamo_extb(c))
def is_supported_hangul(c):
return is_hangul_syllable(c) or is_hangul_compat_jamo(c)
def check_hangul(c, jamo_only=False):
if not ((jamo_only or is_hangul_compat_jamo(c)) or is_supported_hangul(c)):
raise ValueError(f"'{c}' is not a supported hangul character. "
f"'Hangul Syllables' (0xac00 ~ 0xd7a3) and "
f"'Hangul Compatibility Jamos' (0x3130 ~ 0x318f) are "
f"supported at the moment.")
def get_jamo_type(c):
check_hangul(c)
assert is_hangul_compat_jamo(c), f"not a jamo: {ord(c):x}"
return sum(t for t, s in CHAR_SETS.items() if c in s)
def split_syllable_char(c):
"""
Splits a given korean syllable into its components. Each component is
represented by Unicode in 'Hangul Compatibility Jamo' range.
Arguments:
c: A Korean character.
Returns:
A triple (initial, medial, final) of Hangul Compatibility Jamos.
If no jamo corresponds to a position, `None` is returned there.
Example:
>>> split_syllable_char("안")
("ㅇ", "ㅏ", "ㄴ")
>>> split_syllable_char("고")
("ㄱ", "ㅗ", None)
>>> split_syllable_char("ㅗ")
(None, "ㅗ", None)
>>> split_syllable_char("ㅇ")
("ㅇ", None, None)
"""
check_hangul(c)
if len(c) != 1:
raise ValueError("Input string must have exactly one character.")
init, med, final = None, None, None
if is_hangul_syllable(c):
offset = ord(c) - 0xac00
x = (offset - offset % 28) // 28
init, med, final = x // 21, x % 21, offset % 28
if not final:
final = None
else:
final -= 1
else:
pos = get_jamo_type(c)
if pos & INITIAL == INITIAL:
pos = INITIAL
elif pos & MEDIAL == MEDIAL:
pos = MEDIAL
elif pos & FINAL == FINAL:
pos = FINAL
idx = CHAR_INDICES[pos][c]
if pos == INITIAL:
init = idx
elif pos == MEDIAL:
med = idx
else:
final = idx
return tuple(CHAR_LISTS[pos][idx] if idx is not None else None
for pos, idx in
zip([INITIAL, MEDIAL, FINAL], [init, med, final]))
def split_syllables(s, ignore_err=True, pad=None):
"""
Performs syllable-split on a string.
Arguments:
s (str): A string (possibly mixed with non-Hangul characters).
ignore_err (bool): If set False, it ensures that all characters in
the string are Hangul-splittable and throws a ValueError otherwise.
(default: True)
pad (str): Pad empty jamo positions (initial, medial, or final) with
`pad` character. This is useful for cases where fixed-length
strings are needed. (default: None)
Returns:
Hangul-split string
Example:
>>> split_syllables("안녕하세요")
"ㅇㅏㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ"
>>> split_syllables("안녕하세요~~", ignore_err=False)
ValueError: encountered an unsupported character: ~ (0x7e)
>>> split_syllables("안녕하세요ㅛ", pad="x")
'ㅇㅏㄴㄴㅕㅇㅎㅏxㅅㅔxㅇㅛxxㅛx'
"""
def try_split(c):
try:
return split_syllable_char(c)
except ValueError:
if ignore_err:
return (c,)
raise ValueError(f"encountered an unsupported character: "
f"{c} (0x{ord(c):x})")
s = map(try_split, s)
if pad is not None:
tuples = map(lambda x: tuple(pad if y is None else y for y in x), s)
else:
tuples = map(lambda x: filter(None, x), s)
return "".join(itertools.chain(*tuples))
def join_jamos_char(init, med, final=None):
"""
Combines jamos into a single syllable.
Arguments:
init (str): Initial jao.
med (str): Medial jamo.
final (str): Final jamo. If not supplied, the final syllable is made
without the final. (default: None)
Returns:
A Korean syllable.
"""
chars = (init, med, final)
for c in filter(None, chars):
check_hangul(c, jamo_only=True)
idx = tuple(CHAR_INDICES[pos][c] if c is not None else c
for pos, c in zip((INITIAL, MEDIAL, FINAL), chars))
init_idx, med_idx, final_idx = idx
# final index must be shifted once as
# final index with 0 points to syllables without final
final_idx = 0 if final_idx is None else final_idx + 1
return chr(0xac00 + 28 * 21 * init_idx + 28 * med_idx + final_idx)
def join_jamos(s, ignore_err=True):
"""
Combines a sequence of jamos to produce a sequence of syllables.
Arguments:
s (str): A string (possible mixed with non-jamo characters).
ignore_err (bool): If set False, it will ensure that all characters
will be consumed for the making of syllables. It will throw a
ValueError when it fails to do so. (default: True)
Returns:
A string
Example:
>>> join_jamos("ㅇㅏㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ")
"안녕하세요"
>>> join_jamos("ㅇㅏㄴㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ")
"안ㄴ녕하세요"
>>> join_jamos()
"""
last_t = 0
queue = []
new_string = ""
def flush(n=0):
new_queue = []
while len(queue) > n:
new_queue.append(queue.pop())
if len(new_queue) == 1:
if not ignore_err:
raise ValueError(f"invalid jamo character: {new_queue[0]}")
result = new_queue[0]
elif len(new_queue) >= 2:
try:
result = join_jamos_char(*new_queue)
except (ValueError, KeyError):
# Invalid jamo combination
if not ignore_err:
raise ValueError(f"invalid jamo characters: {new_queue}")
result = "".join(new_queue)
else:
result = None
return result
for c in s:
if c not in CHARSET:
if queue:
new_c = flush() + c
else:
new_c = c
last_t = 0
else:
t = get_jamo_type(c)
new_c = None
if t & FINAL == FINAL:
if not (last_t == MEDIAL):
new_c = flush()
elif t == INITIAL:
new_c = flush()
elif t == MEDIAL:
if last_t & INITIAL == INITIAL:
new_c = flush(1)
else:
new_c = flush()
last_t = t
queue.insert(0, c)
if new_c:
new_string += new_c
if queue:
new_string += flush()
return new_string
|
gpl-3.0
| 8,579,874,677,290,545,000 | -1,119,776,010,287,839,500 | 29.820789 | 79 | 0.551227 | false |
maftieu/CouchPotatoServer
|
libs/git/__init__.py
|
121
|
1673
|
# Copyright (c) 2009, Rotem Yaari <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from repository import RemoteRepository
from repository import LocalRepository
from repository import clone
from repository import find_repository
|
gpl-3.0
| -7,212,389,253,509,985,000 | 6,849,842,331,946,817,000 | 58.75 | 79 | 0.777645 | false |
h3llrais3r/Auto-Subliminal
|
lib/ws4py/websocket.py
|
4
|
17891
|
# -*- coding: utf-8 -*-
import logging
import socket
import ssl
import time
import threading
import types
import errno
try:
from OpenSSL.SSL import Error as pyOpenSSLError
except ImportError:
class pyOpenSSLError(Exception):
pass
from ws4py import WS_KEY, WS_VERSION
from ws4py.exc import HandshakeError, StreamClosed
from ws4py.streaming import Stream
from ws4py.messaging import Message, PingControlMessage,\
PongControlMessage
from ws4py.compat import basestring, unicode
DEFAULT_READING_SIZE = 2
logger = logging.getLogger('ws4py')
__all__ = ['WebSocket', 'EchoWebSocket', 'Heartbeat']
class Heartbeat(threading.Thread):
def __init__(self, websocket, frequency=2.0):
"""
Runs at a periodic interval specified by
`frequency` by sending an unsolicitated pong
message to the connected peer.
If the message fails to be sent and a socket
error is raised, we close the websocket
socket automatically, triggering the `closed`
handler.
"""
threading.Thread.__init__(self)
self.websocket = websocket
self.frequency = frequency
def __enter__(self):
if self.frequency:
self.start()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.stop()
def stop(self):
self.running = False
def run(self):
self.running = True
while self.running:
time.sleep(self.frequency)
if self.websocket.terminated:
break
try:
self.websocket.send(PongControlMessage(data='beep'))
except socket.error:
logger.info("Heartbeat failed")
self.websocket.server_terminated = True
self.websocket.close_connection()
break
class WebSocket(object):
""" Represents a websocket endpoint and provides a high level interface to drive the endpoint. """
def __init__(self, sock, protocols=None, extensions=None, environ=None, heartbeat_freq=None):
""" The ``sock`` is an opened connection
resulting from the websocket handshake.
If ``protocols`` is provided, it is a list of protocols
negotiated during the handshake as is ``extensions``.
If ``environ`` is provided, it is a copy of the WSGI environ
dictionnary from the underlying WSGI server.
"""
self.stream = Stream(always_mask=False)
"""
Underlying websocket stream that performs the websocket
parsing to high level objects. By default this stream
never masks its messages. Clients using this class should
set the ``stream.always_mask`` fields to ``True``
and ``stream.expect_masking`` fields to ``False``.
"""
self.protocols = protocols
"""
List of protocols supported by this endpoint.
Unused for now.
"""
self.extensions = extensions
"""
List of extensions supported by this endpoint.
Unused for now.
"""
self.sock = sock
"""
Underlying connection.
"""
self._is_secure = hasattr(sock, '_ssl') or hasattr(sock, '_sslobj')
"""
Tell us if the socket is secure or not.
"""
self.client_terminated = False
"""
Indicates if the client has been marked as terminated.
"""
self.server_terminated = False
"""
Indicates if the server has been marked as terminated.
"""
self.reading_buffer_size = DEFAULT_READING_SIZE
"""
Current connection reading buffer size.
"""
self.environ = environ
"""
WSGI environ dictionary.
"""
self.heartbeat_freq = heartbeat_freq
"""
At which interval the heartbeat will be running.
Set this to `0` or `None` to disable it entirely.
"""
"Internal buffer to get around SSL problems"
self.buf = b''
self._local_address = None
self._peer_address = None
@property
def local_address(self):
"""
Local endpoint address as a tuple
"""
if not self._local_address:
self._local_address = self.sock.getsockname()
if len(self._local_address) == 4:
self._local_address = self._local_address[:2]
return self._local_address
@property
def peer_address(self):
"""
Peer endpoint address as a tuple
"""
if not self._peer_address:
self._peer_address = self.sock.getpeername()
if len(self._peer_address) == 4:
self._peer_address = self._peer_address[:2]
return self._peer_address
def opened(self):
"""
Called by the server when the upgrade handshake
has succeeded.
"""
pass
def close(self, code=1000, reason=''):
"""
Call this method to initiate the websocket connection
closing by sending a close frame to the connected peer.
The ``code`` is the status code representing the
termination's reason.
Once this method is called, the ``server_terminated``
attribute is set. Calling this method several times is
safe as the closing frame will be sent only the first
time.
.. seealso:: Defined Status Codes http://tools.ietf.org/html/rfc6455#section-7.4.1
"""
if not self.server_terminated:
self.server_terminated = True
try:
self._write(self.stream.close(code=code, reason=reason).single(mask=self.stream.always_mask))
except Exception as ex:
logger.error("Error when terminating the connection: %s", str(ex))
def closed(self, code, reason=None):
"""
Called when the websocket stream and connection are finally closed.
The provided ``code`` is status set by the other point and
``reason`` is a human readable message.
.. seealso:: Defined Status Codes http://tools.ietf.org/html/rfc6455#section-7.4.1
"""
pass
@property
def terminated(self):
"""
Returns ``True`` if both the client and server have been
marked as terminated.
"""
return self.client_terminated is True and self.server_terminated is True
@property
def connection(self):
return self.sock
def close_connection(self):
"""
Shutdowns then closes the underlying connection.
"""
if self.sock:
try:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
except:
pass
finally:
self.sock = None
def ping(self, message):
"""
Send a ping message to the remote peer.
The given `message` must be a unicode string.
"""
self.send(PingControlMessage(message))
def ponged(self, pong):
"""
Pong message, as a :class:`messaging.PongControlMessage` instance,
received on the stream.
"""
pass
def received_message(self, message):
"""
Called whenever a complete ``message``, binary or text,
is received and ready for application's processing.
The passed message is an instance of :class:`messaging.TextMessage`
or :class:`messaging.BinaryMessage`.
.. note:: You should override this method in your subclass.
"""
pass
def unhandled_error(self, error):
"""
Called whenever a socket, or an OS, error is trapped
by ws4py but not managed by it. The given error is
an instance of `socket.error` or `OSError`.
Note however that application exceptions will not go
through this handler. Instead, do make sure you
protect your code appropriately in `received_message`
or `send`.
The default behaviour of this handler is to log
the error with a message.
"""
logger.exception("Failed to receive data")
def _write(self, b):
"""
Trying to prevent a write operation
on an already closed websocket stream.
This cannot be bullet proof but hopefully
will catch almost all use cases.
"""
if self.terminated or self.sock is None:
raise RuntimeError("Cannot send on a terminated websocket")
self.sock.sendall(b)
def send(self, payload, binary=False):
"""
Sends the given ``payload`` out.
If ``payload`` is some bytes or a bytearray,
then it is sent as a single message not fragmented.
If ``payload`` is a generator, each chunk is sent as part of
fragmented message.
If ``binary`` is set, handles the payload as a binary message.
"""
message_sender = self.stream.binary_message if binary else self.stream.text_message
if isinstance(payload, basestring) or isinstance(payload, bytearray):
m = message_sender(payload).single(mask=self.stream.always_mask)
self._write(m)
elif isinstance(payload, Message):
data = payload.single(mask=self.stream.always_mask)
self._write(data)
elif type(payload) == types.GeneratorType:
bytes = next(payload)
first = True
for chunk in payload:
self._write(message_sender(bytes).fragment(first=first, mask=self.stream.always_mask))
bytes = chunk
first = False
self._write(message_sender(bytes).fragment(first=first, last=True, mask=self.stream.always_mask))
else:
raise ValueError("Unsupported type '%s' passed to send()" % type(payload))
def _get_from_pending(self):
"""
The SSL socket object provides the same interface
as the socket interface but behaves differently.
When data is sent over a SSL connection
more data may be read than was requested from by
the ws4py websocket object.
In that case, the data may have been indeed read
from the underlying real socket, but not read by the
application which will expect another trigger from the
manager's polling mechanism as if more data was still on the
wire. This will happen only when new data is
sent by the other peer which means there will be
some delay before the initial read data is handled
by the application.
Due to this, we have to rely on a non-public method
to query the internal SSL socket buffer if it has indeed
more data pending in its buffer.
Now, some people in the Python community
`discourage <https://bugs.python.org/issue21430>`_
this usage of the ``pending()`` method because it's not
the right way of dealing with such use case. They advise
`this approach <https://docs.python.org/dev/library/ssl.html#notes-on-non-blocking-sockets>`_
instead. Unfortunately, this applies only if the
application can directly control the poller which is not
the case with the WebSocket abstraction here.
We therefore rely on this `technic <http://stackoverflow.com/questions/3187565/select-and-ssl-in-python>`_
which seems to be valid anyway.
This is a bit of a shame because we have to process
more data than what wanted initially.
"""
data = b""
pending = self.sock.pending()
while pending:
data += self.sock.recv(pending)
pending = self.sock.pending()
return data
def once(self):
"""
Performs the operation of reading from the underlying
connection in order to feed the stream of bytes.
Because this needs to support SSL sockets, we must always
read as much as might be in the socket at any given time,
however process expects to have itself called with only a certain
number of bytes at a time. That number is found in
self.reading_buffer_size, so we read everything into our own buffer,
and then from there feed self.process.
Then the stream indicates
whatever size must be read from the connection since
it knows the frame payload length.
It returns `False` if an error occurred at the
socket level or during the bytes processing. Otherwise,
it returns `True`.
"""
if self.terminated:
logger.debug("WebSocket is already terminated")
return False
try:
b = b''
if self._is_secure:
b = self._get_from_pending()
if not b and not self.buf:
b = self.sock.recv(self.reading_buffer_size)
if not b and not self.buf:
return False
self.buf += b
except (socket.error, OSError, pyOpenSSLError) as e:
if hasattr(e, "errno") and e.errno == errno.EINTR:
pass
else:
self.unhandled_error(e)
return False
else:
# process as much as we can
# the process will stop either if there is no buffer left
# or if the stream is closed
# only pass the requested number of bytes, leave the rest in the buffer
requested = self.reading_buffer_size
if not self.process(self.buf[:requested]):
return False
self.buf = self.buf[requested:]
return True
def terminate(self):
"""
Completes the websocket by calling the `closed`
method either using the received closing code
and reason, or when none was received, using
the special `1006` code.
Finally close the underlying connection for
good and cleanup resources by unsetting
the `environ` and `stream` attributes.
"""
s = self.stream
try:
if s.closing is None:
self.closed(1006, "Going away")
else:
self.closed(s.closing.code, s.closing.reason)
finally:
self.client_terminated = self.server_terminated = True
self.close_connection()
# Cleaning up resources
s._cleanup()
self.stream = None
self.environ = None
def process(self, bytes):
""" Takes some bytes and process them through the
internal stream's parser. If a message of any kind is
found, performs one of these actions:
* A closing message will initiate the closing handshake
* Errors will initiate a closing handshake
* A message will be passed to the ``received_message`` method
* Pings will see pongs be sent automatically
* Pongs will be passed to the ``ponged`` method
The process should be terminated when this method
returns ``False``.
"""
s = self.stream
if not bytes and self.reading_buffer_size > 0:
return False
self.reading_buffer_size = s.parser.send(bytes) or DEFAULT_READING_SIZE
if s.closing is not None:
logger.debug("Closing message received (%d) '%s'" % (s.closing.code, s.closing.reason))
if not self.server_terminated:
self.close(s.closing.code, s.closing.reason)
else:
self.client_terminated = True
return False
if s.errors:
for error in s.errors:
logger.debug("Error message received (%d) '%s'" % (error.code, error.reason))
self.close(error.code, error.reason)
s.errors = []
return False
if s.has_message:
self.received_message(s.message)
if s.message is not None:
s.message.data = None
s.message = None
return True
if s.pings:
for ping in s.pings:
self._write(s.pong(ping.data))
s.pings = []
if s.pongs:
for pong in s.pongs:
self.ponged(pong)
s.pongs = []
return True
def run(self):
"""
Performs the operation of reading from the underlying
connection in order to feed the stream of bytes.
We start with a small size of two bytes to be read
from the connection so that we can quickly parse an
incoming frame header. Then the stream indicates
whatever size must be read from the connection since
it knows the frame payload length.
Note that we perform some automatic opererations:
* On a closing message, we respond with a closing
message and finally close the connection
* We respond to pings with pong messages.
* Whenever an error is raised by the stream parsing,
we initiate the closing of the connection with the
appropiate error code.
This method is blocking and should likely be run
in a thread.
"""
self.sock.setblocking(True)
with Heartbeat(self, frequency=self.heartbeat_freq):
s = self.stream
try:
self.opened()
while not self.terminated:
if not self.once():
break
finally:
self.terminate()
class EchoWebSocket(WebSocket):
def received_message(self, message):
"""
Automatically sends back the provided ``message`` to
its originating endpoint.
"""
self.send(message.data, message.is_binary)
|
gpl-3.0
| 1,342,445,205,321,523,000 | 8,473,947,613,599,662,000 | 32.19295 | 114 | 0.593874 | false |
YACOWS/opps
|
tests/core/permissions/test_models.py
|
4
|
2200
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from opps.channels.models import Channel
from opps.core.permissions.models import Permission, PermissionGroup
User = get_user_model()
class PermissionModelTest(TestCase):
def test_create(self):
user = User.objects.create(username='user')
instance = Permission.objects.create(user=user)
self.assertTrue(instance)
def test_empty_get_by_user(self):
user = User.objects.create(username='another')
result = Permission.get_by_user(user)
self.assertEqual(len(result['sites_id']), 0)
self.assertEqual(len(result['all_sites_id']), 0)
self.assertEqual(len(result['channels_id']), 0)
self.assertEqual(len(result['channels_sites_id']), 0)
def test_get_by_user_with_user_permission(self):
user = User.objects.create(username='john_doe')
site = Site.objects.all()[0]
channel = Channel.objects.create(
name='Home',
slug='home',
site=site,
user=user
)
permission = Permission.objects.create(user=user)
permission.channel.add(channel)
permission.save()
result = Permission.get_by_user(user)
self.assertTrue(site.pk in result['all_sites_id'])
self.assertTrue(channel.pk in result['channels_id'])
def test_get_by_user_with_group_permission(self):
group = Group.objects.create(name='programmers')
user = User.objects.create(username='john_doe')
user.groups.add(group)
site = Site.objects.all()[0]
channel = Channel.objects.create(
name='Home',
slug='home',
site=site,
user=user
)
permission = PermissionGroup.objects.create(group=group)
permission.channel.add(channel)
permission.save()
result = Permission.get_by_user(user)
self.assertTrue(site.pk in result['all_sites_id'])
self.assertTrue(channel.pk in result['channels_id'])
|
mit
| -3,891,205,289,608,450,600 | -6,325,543,070,881,678,000 | 31.352941 | 68 | 0.637273 | false |
Distrotech/bzr
|
bzrlib/tests/test_cache_utf8.py
|
2
|
4352
|
# Copyright (C) 2006 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for utf8 caching."""
from bzrlib import (
cache_utf8,
)
from bzrlib.tests import TestCase
class TestEncodeCache(TestCase):
def setUp(self):
super(TestEncodeCache, self).setUp()
cache_utf8.clear_encoding_cache()
self.addCleanup(cache_utf8.clear_encoding_cache)
def check_encode(self, rev_id):
rev_id_utf8 = rev_id.encode('utf-8')
self.assertFalse(rev_id in cache_utf8._unicode_to_utf8_map)
self.assertFalse(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
# After a single encode, the mapping should exist for
# both directions
self.assertEqual(rev_id_utf8, cache_utf8.encode(rev_id))
self.assertTrue(rev_id in cache_utf8._unicode_to_utf8_map)
self.assertTrue(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
self.assertEqual(rev_id, cache_utf8.decode(rev_id_utf8))
cache_utf8.clear_encoding_cache()
self.assertFalse(rev_id in cache_utf8._unicode_to_utf8_map)
self.assertFalse(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
def check_decode(self, rev_id):
rev_id_utf8 = rev_id.encode('utf-8')
self.assertFalse(rev_id in cache_utf8._unicode_to_utf8_map)
self.assertFalse(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
# After a single decode, the mapping should exist for
# both directions
self.assertEqual(rev_id, cache_utf8.decode(rev_id_utf8))
self.assertTrue(rev_id in cache_utf8._unicode_to_utf8_map)
self.assertTrue(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
self.assertEqual(rev_id_utf8, cache_utf8.encode(rev_id))
cache_utf8.clear_encoding_cache()
self.assertFalse(rev_id in cache_utf8._unicode_to_utf8_map)
self.assertFalse(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
def test_ascii(self):
self.check_decode(u'all_ascii_characters123123123')
self.check_encode(u'all_ascii_characters123123123')
def test_unicode(self):
self.check_encode(u'some_\xb5_unicode_\xe5_chars')
self.check_decode(u'some_\xb5_unicode_\xe5_chars')
def test_cached_unicode(self):
x = u'\xb5yy' + u'\xe5zz'
y = u'\xb5yy' + u'\xe5zz'
self.assertFalse(x is y)
xp = cache_utf8.get_cached_unicode(x)
yp = cache_utf8.get_cached_unicode(y)
self.assertIs(xp, x)
self.assertIs(xp, yp)
def test_cached_utf8(self):
x = u'\xb5yy\xe5zz'.encode('utf8')
y = u'\xb5yy\xe5zz'.encode('utf8')
self.assertFalse(x is y)
xp = cache_utf8.get_cached_utf8(x)
yp = cache_utf8.get_cached_utf8(y)
self.assertIs(xp, x)
self.assertIs(xp, yp)
def test_cached_ascii(self):
x = '%s %s' % ('simple', 'text')
y = '%s %s' % ('simple', 'text')
self.assertFalse(x is y)
xp = cache_utf8.get_cached_ascii(x)
yp = cache_utf8.get_cached_ascii(y)
self.assertIs(xp, x)
self.assertIs(xp, yp)
# after caching, encode and decode should also return the right
# objects.
uni_x = cache_utf8.decode(x)
self.assertEqual(u'simple text', uni_x)
self.assertIsInstance(uni_x, unicode)
utf8_x = cache_utf8.encode(uni_x)
self.assertIs(utf8_x, x)
def test_decode_with_None(self):
self.assertEqual(None, cache_utf8._utf8_decode_with_None(None))
self.assertEqual(u'foo', cache_utf8._utf8_decode_with_None('foo'))
self.assertEqual(u'f\xb5',
cache_utf8._utf8_decode_with_None('f\xc2\xb5'))
|
gpl-2.0
| -2,162,313,173,147,394,800 | -950,536,064,740,259,800 | 36.196581 | 78 | 0.649127 | false |
eirslett/microservices-infrastructure
|
docs/conf.py
|
25
|
9388
|
# -*- coding: utf-8 -*-
#
# Microservices Infrastructure documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 4 06:59:14 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Microservices Infrastructure'
copyright = u'2015, Cisco Systems, Incorporated'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
import alabaster
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
extensions += ['alabaster']
html_theme_options = {
'github_user': 'ciscocloud',
'github_repo': 'microservices-infrastructure',
'logo': 'cisco.png',
'logo_name': True,
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html'
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MicroservicesInfrastructuredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'MicroservicesInfrastructure.tex', u'Microservices Infrastructure Documentation',
u'Cisco Systems, Incorporated', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
latex_show_urls = 'footnote'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'microservicesinfrastructure', u'Microservices Infrastructure Documentation',
[u'Cisco Systems, Incorporated'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'MicroservicesInfrastructure', u'Microservices Infrastructure Documentation',
u'Cisco Systems, Incorporated', 'MicroservicesInfrastructure', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'ansible': ('http://docs.ansible.com/', None),
}
# -- Options for todo ext ------------------------------------------------
todo_include_todos = os.getenv('INCLUDE_TODOS', '0') == '1' or version != release
# -- setup ---------------------------------------------------------------
def setup(app):
from sphinx.util.texescape import tex_replacements
tex_replacements.extend([
(u'☐', u'[ ]'),
(u'☑', u'[x]'),
])
|
apache-2.0
| 8,536,454,077,919,035,000 | 1,594,469,363,914,350,600 | 31.027304 | 101 | 0.697144 | false |
drnextgis/QGIS
|
python/plugins/processing/core/parameters.py
|
1
|
55397
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
Parameters.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
from builtins import range
from builtins import object
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import sys
import os
import math
from inspect import isclass
from copy import deepcopy
import numbers
from qgis.utils import iface
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (QgsRasterLayer, QgsVectorLayer, QgsMapLayer, QgsCoordinateReferenceSystem,
QgsExpressionContext, QgsExpressionContextUtils, QgsExpression, QgsExpressionContextScope)
from processing.tools.vector import resolveFieldIndex, features
from processing.tools import dataobjects
from processing.core.outputs import OutputNumber, OutputRaster, OutputVector
from processing.tools.dataobjects import getObject
def parseBool(s):
if s is None or s == str(None).lower():
return None
return str(s).lower() == str(True).lower()
def _splitParameterOptions(line):
tokens = line.split('=', 1)
if tokens[1].lower().strip().startswith('optional'):
isOptional = True
definition = tokens[1].strip()[len('optional') + 1:]
else:
isOptional = False
definition = tokens[1]
return isOptional, tokens[0], definition
def _createDescriptiveName(s):
return s.replace('_', ' ')
def _expressionContext():
context = QgsExpressionContext()
context.appendScope(QgsExpressionContextUtils.globalScope())
context.appendScope(QgsExpressionContextUtils.projectScope())
if iface.mapCanvas():
context.appendScope(QgsExpressionContextUtils.mapSettingsScope(iface.mapCanvas().mapSettings()))
processingScope = QgsExpressionContextScope()
extent = iface.mapCanvas().fullExtent()
processingScope.setVariable('fullextent_minx', extent.xMinimum())
processingScope.setVariable('fullextent_miny', extent.yMinimum())
processingScope.setVariable('fullextent_maxx', extent.xMaximum())
processingScope.setVariable('fullextent_maxy', extent.yMaximum())
context.appendScope(processingScope)
return context
def _resolveLayers(value):
layers = dataobjects.getAllLayers()
if value:
inputlayers = value.split(';')
for i, inputlayer in enumerate(inputlayers):
for layer in layers:
if layer.name() == inputlayer:
inputlayers[i] = layer.source()
break
return ";".join(inputlayers)
class Parameter(object):
"""
Base class for all parameters that a geoalgorithm might
take as input.
"""
default_metadata = {}
def __init__(self, name='', description='', default=None, optional=False,
metadata={}):
self.name = name
self.description = description
self.default = default
self.value = default
self.isAdvanced = False
# A hidden parameter can be used to set a hard-coded value.
# It can be used as any other parameter, but it will not be
# shown to the user
self.hidden = False
self.optional = parseBool(optional)
# TODO: make deep copy and deep update
self.metadata = deepcopy(self.default_metadata)
self.metadata.update(deepcopy(metadata))
def setValue(self, obj):
"""
Sets the value of the parameter.
Returns true if the value passed is correct for the type
of parameter.
"""
if obj is None:
if not self.optional:
return False
self.value = None
return True
self.value = str(obj)
return True
def setDefaultValue(self):
"""
Sets the value of the parameter to the default one
Returns true if the default value is correct for the type
of parameter.
"""
return self.setValue(self.default)
def __str__(self):
return u'{} <{}>'.format(self.name, self.__class__.__name__)
def getValueAsCommandLineParameter(self):
"""
Returns the value of this parameter as it should have been
entered in the console if calling an algorithm using the
Processing.runalg() method.
"""
return str(self.value)
def typeName(self):
return self.__class__.__name__.replace('Parameter', '').lower()
def todict(self):
o = deepcopy(self.__dict__)
del o['metadata']
return o
def tr(self, string, context=''):
if context == '':
context = 'Parameter'
return QCoreApplication.translate(context, string)
def wrapper(self, dialog, row=0, col=0):
wrapper = self.metadata.get('widget_wrapper', None)
# wrapper metadata should be a class path
if isinstance(wrapper, str):
tokens = wrapper.split('.')
mod = __import__('.'.join(tokens[:-1]), fromlist=[tokens[-1]])
wrapper = getattr(mod, tokens[-1])
# or directly a class object
if isclass(wrapper):
wrapper = wrapper(self, dialog, row, col)
# or a wrapper instance
return wrapper
def evaluate(self, alg):
pass
def evaluateForModeler(self, value, model):
return value
class ParameterBoolean(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.BooleanWidgetWrapper'
}
def __init__(self, name='', description='', default=None, optional=False, metadata={}):
Parameter.__init__(self, name, description, parseBool(default), optional, metadata)
def setValue(self, value):
if value is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(value, str):
self.value = str(value).lower() == str(True).lower()
else:
self.value = bool(value)
return True
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'boolean '
return '##' + self.name + '=' + param_type + str(self.default)
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("boolean"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('boolean') + 1:]
if default:
param = ParameterBoolean(name, descName, default)
else:
param = ParameterBoolean(name, descName)
param.optional = isOptional
return param
class ParameterCrs(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.CrsWidgetWrapper'
}
def __init__(self, name='', description='', default=None, optional=False, metadata={}):
'''The value is a string that uniquely identifies the
coordinate reference system. Typically it is the auth id of the CRS
(if the authority is EPSG) or proj4 string of the CRS (in case
of other authorities or user defined projections).'''
Parameter.__init__(self, name, description, default, optional, metadata)
def setValue(self, value):
if not bool(value):
if not self.optional:
return False
self.value = None
return True
if isinstance(value, QgsCoordinateReferenceSystem):
self.value = value.authid()
return True
if isinstance(value, QgsMapLayer):
self.value = value.crs().authid()
return True
try:
layer = dataobjects.getObjectFromUri(value)
if layer is not None:
self.value = layer.crs().authid()
return True
except:
pass
# TODO: check it is a valid authid
self.value = value
return True
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'crs '
return '##' + self.name + '=' + param_type + str(self.default)
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("crs"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('crs') + 1:]
if default:
return ParameterCrs(name, descName, default, isOptional)
else:
return ParameterCrs(name, descName, None, isOptional)
class ParameterDataObject(Parameter):
def getValueAsCommandLineParameter(self):
if self.value is None:
return str(None)
else:
s = dataobjects.normalizeLayerSource(str(self.value))
s = '"%s"' % s
return s
def evaluate(self, alg):
self.value = _resolveLayers(self.value)
class ParameterExtent(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.ExtentWidgetWrapper'
}
USE_MIN_COVERING_EXTENT = 'USE_MIN_COVERING_EXTENT'
def __init__(self, name='', description='', default=None, optional=True):
Parameter.__init__(self, name, description, default, optional)
# The value is a string in the form "xmin, xmax, ymin, ymax"
def setValue(self, value):
if not value:
if not self.optional:
return False
self.value = None
return True
if isinstance(value, QgsMapLayer):
rect = value.extent()
self.value = '{},{},{},{}'.format(
rect.xMinimum(), rect.xMaximum(), rect.yMinimum(), rect.yMaximum())
return True
try:
layer = dataobjects.getObjectFromUri(value)
if layer is not None:
rect = layer.extent()
self.value = '{},{},{},{}'.format(
rect.xMinimum(), rect.xMaximum(), rect.yMinimum(), rect.yMaximum())
return True
except:
pass
tokens = str(value).split(',')
if len(tokens) != 4:
return False
try:
float(tokens[0])
float(tokens[1])
float(tokens[2])
float(tokens[3])
self.value = value
return True
except:
return False
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'extent'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("extent"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('extent') + 1:] or None
return ParameterExtent(name, descName, default, isOptional)
def evaluate(self, alg):
if self.optional and not bool(self.value):
self.value = self.getMinCoveringExtent(alg)
def getMinCoveringExtent(self, alg):
first = True
found = False
for param in alg.parameters:
if param.value:
if isinstance(param, (ParameterRaster, ParameterVector)):
if isinstance(param.value, (QgsRasterLayer,
QgsVectorLayer)):
layer = param.value
else:
layer = dataobjects.getObject(param.value)
if layer:
found = True
self.addToRegion(layer, first)
first = False
elif isinstance(param, ParameterMultipleInput):
layers = param.value.split(';')
for layername in layers:
layer = dataobjects.getObject(layername)
if layer:
found = True
self.addToRegion(layer, first)
first = False
if found:
return '{},{},{},{}'.format(
self.xmin, self.xmax, self.ymin, self.ymax)
else:
return None
def addToRegion(self, layer, first):
if first:
self.xmin = layer.extent().xMinimum()
self.xmax = layer.extent().xMaximum()
self.ymin = layer.extent().yMinimum()
self.ymax = layer.extent().yMaximum()
else:
self.xmin = min(self.xmin, layer.extent().xMinimum())
self.xmax = max(self.xmax, layer.extent().xMaximum())
self.ymin = min(self.ymin, layer.extent().yMinimum())
self.ymax = max(self.ymax, layer.extent().yMaximum())
class ParameterPoint(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.PointWidgetWrapper'
}
def __init__(self, name='', description='', default=None, optional=False):
Parameter.__init__(self, name, description, default, optional)
# The value is a string in the form "x, y"
def setValue(self, text):
if text is None:
if not self.optional:
return False
self.value = None
return True
tokens = str(text).split(',')
if len(tokens) != 2:
return False
try:
float(tokens[0])
float(tokens[1])
self.value = text
return True
except:
return False
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'point'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("point"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('point') + 1:] or None
return ParameterPoint(name, descName, default, isOptional)
class ParameterFile(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.FileWidgetWrapper'
}
def __init__(self, name='', description='', isFolder=False, optional=True, ext=None):
Parameter.__init__(self, name, description, None, parseBool(optional))
self.ext = ext
self.isFolder = parseBool(isFolder)
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def setValue(self, obj):
if obj is None or obj.strip() == '':
if not self.optional:
return False
self.value = None if obj is None else obj.strip()
return True
if self.ext is not None and obj != '' and not obj.endswith(self.ext):
return False
self.value = str(obj)
return True
def typeName(self):
if self.isFolder:
return 'directory'
else:
return 'file'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
if self.isFolder:
param_type += 'folder'
else:
param_type += 'file'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("file") or definition.startswith("folder"):
descName = _createDescriptiveName(name)
return ParameterFile(name, descName, definition.startswith("folder"), isOptional)
class ParameterFixedTable(Parameter):
def __init__(self, name='', description='', numRows=3,
cols=['value'], fixedNumOfRows=False, optional=False):
Parameter.__init__(self, name, description, None, optional)
self.cols = cols
if isinstance(cols, str):
self.cols = self.cols.split(";")
self.numRows = int(numRows)
self.fixedNumOfRows = parseBool(fixedNumOfRows)
def setValue(self, obj):
if obj is None:
if not self.optional:
return False
self.value = None
return True
# TODO: check that it contains a correct number of elements
if isinstance(obj, str):
self.value = obj
else:
self.value = ParameterFixedTable.tableToString(obj)
return True
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
@staticmethod
def tableToString(table):
tablestring = ''
for i in range(len(table)):
for j in range(len(table[0])):
tablestring = tablestring + table[i][j] + ','
tablestring = tablestring[:-1]
return tablestring
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("point"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('point') + 1:] or None
return ParameterPoint(name, descName, default, isOptional)
class ParameterMultipleInput(ParameterDataObject):
"""A parameter representing several data objects.
Its value is a string with substrings separated by semicolons,
each of which represents the data source location of each element.
"""
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.MultipleInputWidgetWrapper'
}
exported = None
def __init__(self, name='', description='', datatype=-1, optional=False):
ParameterDataObject.__init__(self, name, description, None, optional)
self.datatype = int(float(datatype))
self.exported = None
self.minNumInputs = 0
""" Set minimum required number of inputs for parameter
By default minimal number of inputs is set to 1
@type _minNumInputs: numeric type or None
@param _minNumInputs: required minimum number of inputs for parameter. \
If user will pass None as parameter, we will use default minimal number of inputs (1)
@return: result, if the minimum number of inputs were set.
"""
def setMinNumInputs(self, _minNumInputs):
if _minNumInputs is None:
self.minNumInputs = 0
return True
if _minNumInputs < 1 and not self.optional:
# don't allow to set negative or null number of inputs if parameter isn't optional
return False
self.minNumInputs = int(_minNumInputs)
return True
""" Get minimum required number of inputs for parameter
@return: minimum number of inputs required for this parameter
@see: setMinNumInputs()
"""
def getMinNumInputs(self):
return self.minNumInputs
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, list):
if len(obj) == 0:
if self.optional:
self.value = None
return True
else:
return False
# prevent setting value if we didn't provide required minimal number of inputs
elif len(obj) < self.minNumInputs:
return False
self.value = ";".join([self.getAsString(lay) for lay in obj])
return True
else:
self.value = str(obj)
return True
def getSafeExportedLayers(self):
"""
Returns not the value entered by the user, but a string with
semicolon-separated filenames which contains the data of the
selected layers, but saved in a standard format (currently
shapefiles for vector layers and GeoTiff for raster) so that
they can be opened by most external applications.
If there is a selection and QGIS is configured to use just the
selection, it exports the layer even if it is already in a
suitable format.
Works only if the layer represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a layer in a suitable format,
it does no export at all and returns that value.
Currently, it works just for vector layer. In the case of
raster layers, it returns the parameter value.
The layers are exported just the first time the method is
called. The method can be called several times and it will
always return the same string, performing the export only the
first time.
"""
if self.exported:
return self.exported
self.exported = self.value
layers = self.value.split(';')
if layers is None or len(layers) == 0:
return self.value
if self.datatype == dataobjects.TYPE_RASTER:
for layerfile in layers:
layer = dataobjects.getObjectFromUri(layerfile, False)
if layer:
filename = dataobjects.exportRasterLayer(layer)
self.exported = self.exported.replace(layerfile, filename)
return self.exported
elif self.datatype == dataobjects.TYPE_FILE:
return self.value
else:
for layerfile in layers:
layer = dataobjects.getObjectFromUri(layerfile, False)
if layer:
filename = dataobjects.exportVectorLayer(layer)
self.exported = self.exported.replace(layerfile, filename)
return self.exported
def getAsString(self, value):
if self.datatype == dataobjects.TYPE_RASTER:
if isinstance(value, QgsRasterLayer):
return str(value.dataProvider().dataSourceUri())
else:
s = str(value)
layers = dataobjects.getRasterLayers()
for layer in layers:
if layer.name() == s:
return str(layer.dataProvider().dataSourceUri())
return s
if self.datatype == dataobjects.TYPE_FILE:
return str(value)
else:
if isinstance(value, QgsVectorLayer):
return str(value.source())
else:
s = str(value)
layers = dataobjects.getVectorLayers([self.datatype])
for layer in layers:
if layer.name() == s:
return str(layer.source())
return s
def getFileFilter(self):
if self.datatype == dataobjects.TYPE_RASTER:
exts = dataobjects.getSupportedOutputRasterLayerExtensions()
elif self.datatype == dataobjects.TYPE_FILE:
return self.tr('All files (*.*)', 'ParameterMultipleInput')
else:
exts = dataobjects.getSupportedOutputVectorLayerExtensions()
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterMultipleInput') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def dataType(self):
if self.datatype == dataobjects.TYPE_VECTOR_POINT:
return 'points'
elif self.datatype == dataobjects.TYPE_VECTOR_LINE:
return 'lines'
elif self.datatype == dataobjects.TYPE_VECTOR_POLYGON:
return 'polygons'
elif self.datatype == dataobjects.TYPE_RASTER:
return 'rasters'
elif self.datatype == dataobjects.TYPE_FILE:
return 'files'
else:
return 'any vectors'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
if self.datatype == dataobjects.TYPE_RASTER:
param_type += 'multiple raster'
if self.datatype == dataobjects.TYPE_FILE:
param_type += 'multiple file'
else:
param_type += 'multiple vector'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip() == 'multiple raster':
return ParameterMultipleInput(name, descName,
dataobjects.TYPE_RASTER, isOptional)
elif definition.lower().strip() == 'multiple vector':
return ParameterMultipleInput(name, definition,
dataobjects.TYPE_VECTOR_ANY, isOptional)
def evaluate(self, alg):
self.value = _resolveLayers(self.value)
class ParameterNumber(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.NumberWidgetWrapper'
}
def __init__(self, name='', description='', minValue=None, maxValue=None,
default=None, optional=False):
Parameter.__init__(self, name, description, default, optional)
if default is not None:
try:
self.default = int(str(default))
self.isInteger = True
except ValueError:
self.default = float(default)
self.isInteger = False
else:
self.isInteger = False
if minValue is not None:
self.min = int(float(minValue)) if self.isInteger else float(minValue)
else:
self.min = None
if maxValue is not None:
self.max = int(float(maxValue)) if self.isInteger else float(maxValue)
else:
self.max = None
self.value = self.default
def setValue(self, n):
if n is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(n, str):
try:
v = self._evaluate(n)
self.value = float(v)
if self.isInteger:
self.value = int(math.floor(self.value))
return True
except:
return False
else:
try:
if float(n) - int(float(n)) == 0:
value = int(float(n))
else:
value = float(n)
if self.min is not None:
if value < self.min:
return False
if self.max is not None:
if value > self.max:
return False
self.value = value
return True
except:
raise
return False
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'number'
code = '##' + self.name + '=' + param_type
if self.default:
code += str(self.default)
return code
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('number'):
default = definition.strip()[len('number'):] or None
if default == 'None':
default = None
return ParameterNumber(name, descName, default=default, optional=isOptional)
def _evaluate(self, value):
exp = QgsExpression(value)
if exp.hasParserError():
raise ValueError(self.tr("Error in parameter expression: ") + exp.parserErrorString())
result = exp.evaluate(_expressionContext())
if exp.hasEvalError():
raise ValueError("Error evaluating parameter expression: " + exp.evalErrorString())
if self.isInteger:
return math.floor(result)
else:
return result
def evaluate(self, alg):
if isinstance(self.value, str) and bool(self.value):
self.value = self._evaluate(self.value)
def _layerVariables(self, element, alg=None):
variables = {}
layer = getObject(element.value)
if layer is not None:
name = element.name if alg is None else "%s_%s" % (alg.name, element.name)
variables['@%s_minx' % name] = layer.extent().xMinimum()
variables['@%s_miny' % name] = layer.extent().yMinimum()
variables['@%s_maxx' % name] = layer.extent().yMaximum()
variables['@%s_maxy' % name] = layer.extent().yMaximum()
if isinstance(element, (ParameterRaster, OutputRaster)):
stats = layer.dataProvider().bandStatistics(1)
variables['@%s_avg' % name] = stats.mean
variables['@%s_stddev' % name] = stats.stdDev
variables['@%s_min' % name] = stats.minimumValue
variables['@%s_max' % name] = stats.maximumValue
return variables
def evaluateForModeler(self, value, model):
if isinstance(value, numbers.Number):
return value
variables = {}
for param in model.parameters:
if isinstance(param, ParameterNumber):
variables["@" + param.name] = param.value
if isinstance(param, (ParameterRaster, ParameterVector)):
variables.update(self._layerVariables(param))
for alg in list(model.algs.values()):
for out in alg.algorithm.outputs:
if isinstance(out, OutputNumber):
variables["@%s_%s" % (alg.name, out.name)] = out.value
if isinstance(out, (OutputRaster, OutputVector)):
variables.update(self._layerVariables(out, alg))
for k, v in list(variables.items()):
value = value.replace(k, str(v))
return value
def expressionContext(self):
return _expressionContext()
def getValueAsCommandLineParameter(self):
if self.value is None:
return str(None)
if isinstance(self.value, str):
return '"%s"' + self.value
return str(self.value)
class ParameterRange(Parameter):
def __init__(self, name='', description='', default=None, optional=False):
Parameter.__init__(self, name, description, default, optional)
if default is not None:
values = default.split(',')
try:
int(values[0])
int(values[1])
self.isInteger = True
except:
self.isInteger = False
else:
self.isInteger = False
def setValue(self, text):
if text is None:
if not self.optional:
return False
self.value = None
return True
tokens = text.split(',')
if len(tokens) != 2:
return False
try:
float(tokens[0])
float(tokens[1])
self.value = text
return True
except:
return False
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"' if self.value is not None else str(None)
class ParameterRaster(ParameterDataObject):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.RasterWidgetWrapper'
}
def __init__(self, name='', description='', optional=False, showSublayersDialog=True):
ParameterDataObject.__init__(self, name, description, None, optional)
self.showSublayersDialog = parseBool(showSublayersDialog)
self.exported = None
def getSafeExportedLayer(self):
"""Returns not the value entered by the user, but a string with
a filename which contains the data of this layer, but saved in
a standard format (currently always a geotiff file) so that it
can be opened by most external applications.
Works only if the layer represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a layer in a suitable format,
it does not export at all and returns that value.
The layer is exported just the first time the method is called.
The method can be called several times and it will always
return the same file, performing the export only the first
time.
"""
if self.exported:
return self.exported
layer = dataobjects.getObjectFromUri(self.value, False)
if layer:
self.exported = dataobjects.exportRasterLayer(layer)
else:
self.exported = self.value
return self.exported
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, QgsRasterLayer):
self.value = str(obj.dataProvider().dataSourceUri())
return True
else:
self.value = str(obj)
return True
def getFileFilter(self):
exts = dataobjects.getSupportedOutputRasterLayerExtensions()
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterRaster') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'raster'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('raster'):
return ParameterRaster(name, descName, optional=isOptional)
class ParameterSelection(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.SelectionWidgetWrapper'
}
def __init__(self, name='', description='', options=[], default=None, isSource=False,
multiple=False, optional=False):
Parameter.__init__(self, name, description, default, optional)
self.multiple = multiple
isSource = parseBool(isSource)
self.options = options
if isSource:
self.options = []
layer = QgsVectorLayer(options[0], "layer", "ogr")
if layer.isValid():
try:
index = resolveFieldIndex(layer, options[1])
feats = features(layer)
for feature in feats:
self.options.append(str(feature.attributes()[index]))
except ValueError:
pass
elif isinstance(self.options, str):
self.options = self.options.split(";")
if default is not None:
try:
self.default = int(default)
except:
self.default = 0
self.value = self.default
def setValue(self, value):
if value is None:
if not self.optional:
return False
self.value = 0
return True
if isinstance(value, list):
if not self.multiple:
return False
values = []
for v in value:
try:
n = int(v)
values.append(n)
except:
return False
if not self.optional and len(values) == 0:
return False
self.value = values
return True
else:
try:
n = int(value)
self.value = n
return True
except:
return False
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('selectionfromfile'):
options = definition.strip()[len('selectionfromfile '):].split(';')
return ParameterSelection(name, descName, options, isSource=True, optional=isOptional)
elif definition.lower().strip().startswith('selection'):
options = definition.strip()[len('selection '):].split(';')
return ParameterSelection(name, descName, options, optional=isOptional)
elif definition.lower().strip().startswith('multipleselectionfromfile'):
options = definition.strip()[len('multipleselectionfromfile '):].split(';')
return ParameterSelection(name, descName, options, isSource=True,
multiple=True, optional=isOptional)
elif definition.lower().strip().startswith('multipleselection'):
options = definition.strip()[len('multipleselection '):].split(';')
return ParameterSelection(name, descName, options, multiple=True, optional=isOptional)
class ParameterEvaluationException(Exception):
def __init__(self, param, msg):
Exception.__init__(msg)
self.param = param
class ParameterString(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.StringWidgetWrapper'
}
NEWLINE = '\n'
ESCAPED_NEWLINE = '\\n'
def __init__(self, name='', description='', default=None, multiline=False,
optional=False, evaluateExpressions=False):
Parameter.__init__(self, name, description, default, optional)
self.multiline = parseBool(multiline)
self.evaluateExpressions = parseBool(evaluateExpressions)
def setValue(self, obj):
if not bool(obj):
if not self.optional:
return False
self.value = None
return True
self.value = str(obj).replace(
ParameterString.ESCAPED_NEWLINE,
ParameterString.NEWLINE
)
return True
def getValueAsCommandLineParameter(self):
return ('"' + str(self.value.replace(ParameterString.NEWLINE,
ParameterString.ESCAPED_NEWLINE)) + '"'
if self.value is not None else str(None))
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'string'
return '##' + self.name + '=' + param_type + self.default
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('string'):
default = definition.strip()[len('string') + 1:]
if default:
return ParameterString(name, descName, default, optional=isOptional)
else:
return ParameterString(name, descName, optional=isOptional)
elif definition.lower().strip().startswith('longstring'):
default = definition.strip()[len('longstring') + 1:]
if default:
return ParameterString(name, descName, default, multiline=True, optional=isOptional)
else:
return ParameterString(name, descName, multiline=True, optional=isOptional)
def evaluate(self, alg):
if isinstance(self.value, str) and bool(self.value) and self.evaluateExpressions:
exp = QgsExpression(self.value)
if exp.hasParserError():
raise ValueError(self.tr("Error in parameter expression: ") + exp.parserErrorString())
result = exp.evaluate(_expressionContext())
if exp.hasEvalError():
raise ValueError("Error evaluating parameter expression: " + exp.evalErrorString())
self.value = result
def expressionContext(self):
return _expressionContext()
class ParameterExpression(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.ExpressionWidgetWrapper'
}
NEWLINE = '\n'
ESCAPED_NEWLINE = '\\n'
def __init__(self, name='', description='', default=None, optional=False, parent_layer=None):
Parameter.__init__(self, name, description, default, optional)
self.parent_layer = parent_layer
def setValue(self, obj):
if not bool(obj):
if not self.optional:
return False
self.value = None
return True
self.value = str(obj).replace(
ParameterString.ESCAPED_NEWLINE,
ParameterString.NEWLINE
)
return True
def getValueAsCommandLineParameter(self):
return ('"' + str(self.value.replace(ParameterExpression.NEWLINE,
ParameterExpression.ESCAPED_NEWLINE)) + '"'
if self.value is not None else str(None))
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'expression'
return '##' + self.name + '=' + param_type + self.default
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.lower().strip().startswith('expression'):
descName = _createDescriptiveName(name)
default = definition.strip()[len('expression') + 1:]
if default:
return ParameterExpression(name, descName, default, optional=isOptional)
else:
return ParameterExpression(name, descName, optional=isOptional)
class ParameterTable(ParameterDataObject):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.TableWidgetWrapper'
}
def __init__(self, name='', description='', optional=False):
ParameterDataObject.__init__(self, name, description, None, optional)
self.exported = None
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, QgsVectorLayer):
source = str(obj.source())
self.value = source
return True
else:
self.value = str(obj)
layers = dataobjects.getTables()
for layer in layers:
if layer.name() == self.value or layer.source() == self.value:
source = str(layer.source())
self.value = source
return True
val = str(obj)
self.value = val
return os.path.exists(self.value)
def getSafeExportedTable(self):
"""Returns not the value entered by the user, but a string with
a filename which contains the data of this table, but saved in
a standard format (currently always a DBF file) so that it can
be opened by most external applications.
Works only if the table represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a table in a suitable format,
it does not export at all and returns that value.
The table is exported just the first time the method is called.
The method can be called several times and it will always
return the same file, performing the export only the first
time.
"""
if self.exported:
return self.exported
table = dataobjects.getObjectFromUri(self.value, False)
if table:
self.exported = dataobjects.exportTable(table)
else:
self.exported = self.value
return self.exported
def getFileFilter(self):
exts = ['csv', 'dbf']
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterTable') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'table'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('table'):
return ParameterTable(name, descName, isOptional)
class ParameterTableField(Parameter):
"""A parameter representing a table field.
Its value is a string that represents the name of the field.
"""
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.TableFieldWidgetWrapper'
}
DATA_TYPE_NUMBER = 0
DATA_TYPE_STRING = 1
DATA_TYPE_DATETIME = 2
DATA_TYPE_ANY = -1
def __init__(self, name='', description='', parent=None, datatype=-1,
optional=False, multiple=False):
Parameter.__init__(self, name, description, None, optional)
self.parent = parent
self.multiple = multiple
self.datatype = int(datatype)
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"' if self.value is not None else str(None)
def setValue(self, value):
if not bool(value):
if not self.optional:
return False
self.value = None
return True
if isinstance(value, list):
if not self.multiple and len(value) > 1:
return False
self.value = ";".join(value)
return True
else:
self.value = str(value)
return True
def __str__(self):
return self.name + ' <' + self.__module__.split('.')[-1] + ' from ' \
+ self.parent + '>'
def dataType(self):
if self.datatype == self.DATA_TYPE_NUMBER:
return 'numeric'
elif self.datatype == self.DATA_TYPE_STRING:
return 'string'
elif self.datatype == self.DATA_TYPE_DATETIME:
return 'datetime'
else:
return 'any'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'field'
return '##' + self.name + '=' + param_type + self.parent
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('field'):
if definition.lower().strip().startswith('field number'):
parent = definition.strip()[len('field number') + 1:]
datatype = ParameterTableField.DATA_TYPE_NUMBER
elif definition.lower().strip().startswith('field string'):
parent = definition.strip()[len('field string') + 1:]
datatype = ParameterTableField.DATA_TYPE_STRING
elif definition.lower().strip().startswith('field datetime'):
parent = definition.strip()[len('field datetime') + 1:]
datatype = ParameterTableField.DATA_TYPE_DATETIME
else:
parent = definition.strip()[len('field') + 1:]
datatype = ParameterTableField.DATA_TYPE_ANY
return ParameterTableField(name, descName, parent, datatype, isOptional)
class ParameterVector(ParameterDataObject):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.VectorWidgetWrapper'
}
def __init__(self, name='', description='', datatype=[-1],
optional=False):
ParameterDataObject.__init__(self, name, description, None, optional)
if isinstance(datatype, int):
datatype = [datatype]
elif isinstance(datatype, str):
datatype = [int(t) for t in datatype.split(',')]
self.datatype = datatype
self.exported = None
self.allowOnlyOpenedLayers = False
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, QgsVectorLayer):
self.value = str(obj.source())
return True
else:
self.value = str(obj)
return True
def getSafeExportedLayer(self):
"""Returns not the value entered by the user, but a string with
a filename which contains the data of this layer, but saved in
a standard format (currently always a shapefile) so that it can
be opened by most external applications.
If there is a selection and QGIS is configured to use just the
selection, if exports the layer even if it is already in a
suitable format.
Works only if the layer represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a layer in a suitable format,
it does not export at all and returns that value.
The layer is exported just the first time the method is called.
The method can be called several times and it will always
return the same file, performing the export only the first
time.
"""
if self.exported:
return self.exported
layer = dataobjects.getObjectFromUri(self.value, False)
if layer:
self.exported = dataobjects.exportVectorLayer(layer)
else:
self.exported = self.value
return self.exported
def getFileFilter(self):
exts = dataobjects.getSupportedOutputVectorLayerExtensions()
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterVector') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def dataType(self):
return dataobjects.vectorDataType(self)
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'vector'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip() == 'vector':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_ANY], isOptional)
elif definition.lower().strip() == 'vector point':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_POINT], isOptional)
elif definition.lower().strip() == 'vector line':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_LINE], isOptional)
elif definition.lower().strip() == 'vector polygon':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_POLYGON], isOptional)
class ParameterGeometryPredicate(Parameter):
predicates = ('intersects',
'contains',
'disjoint',
'equals',
'touches',
'overlaps',
'within',
'crosses')
def __init__(self, name='', description='', left=None, right=None,
optional=False, enabledPredicates=None):
Parameter.__init__(self, name, description, None, optional)
self.left = left
self.right = right
self.value = None
self.enabledPredicates = enabledPredicates
if self.enabledPredicates is None:
self.enabledPredicates = self.predicates
def getValueAsCommandLineParameter(self):
return str(self.value)
def setValue(self, value):
if value is None:
if not self.optional:
return False
self.value = None
return True
elif len(value) == 0 and not self.optional:
return False
if isinstance(value, str):
self.value = value.split(';') # relates to ModelerAlgorithm.resolveValue
else:
self.value = value
return True
paramClasses = [c for c in list(sys.modules[__name__].__dict__.values()) if isclass(c) and issubclass(c, Parameter)]
def getParameterFromString(s):
# Try the parameter definitions used in description files
if '|' in s and (s.startswith("Parameter") or s.startswith("*Parameter")):
isAdvanced = False
if s.startswith("*"):
s = s[1:]
isAdvanced = True
tokens = s.split("|")
params = [t if str(t) != str(None) else None for t in tokens[1:]]
try:
clazz = getattr(sys.modules[__name__], tokens[0])
param = clazz(*params)
param.isAdvanced = isAdvanced
return param
except:
return None
else: # try script syntax
for paramClass in paramClasses:
try:
param = paramClass.fromScriptCode(s)
if param is not None:
return param
except AttributeError:
pass
except:
return None
|
gpl-2.0
| -8,077,861,426,030,466,000 | -6,050,664,965,389,453,000 | 34.083597 | 116 | 0.573605 | false |
andrewk1/Climb-Bot
|
climb-bot.py
|
1
|
3083
|
import praw
import requests
import json
import time
import re
# Function iterates over each submission title and checks if the title contains route syntax that indicates the post is about a route
def parse_titles(bot, subreddit):
start_time = time.time()
for submission in subreddit.stream.submissions():
if (submission.created_utc < start_time):
continue
title = submission.title
# regex matches sequence of capitalized words followed by climb grade notation (V or 5.)
route_regex = '([A-Z][a-z]+(?=\s[A-Z])(?:\s[A-Z][a-z]+)+) [( ]?(5.[0-9][0-9]?[A-Za-z]|[Vv][0-9][0-9]?)'
route_name = re.search(route_regex, title)
print route_name
comment = make_get_request(route_name.group(0))
if comment != 'NA':
submission.reply(comment)
# Call custom google search engine API to parse the formulated title and gather theCrag's metadata for the route
def make_get_request(route):
key = 'key=***'
cx = 'cx=***'
query= 'q='+route
google_url = 'https://www.googleapis.com/customsearch/v1?' + key + cx + query
response = requests.get(google_url)
parsed_response= json.loads(response.text)
return form_post(parsed_response)
# Extract data from google's JSON response and form a post
def form_post(parsed_response):
# Check if Google search received a hit
if parsed_response['searchInformation']['totalResults'] == 0 or 'items' not in parsed_response:
return 'NA'
title = parsed_response['items'][0]['title']
print title
breadcrumb = parsed_response['items'][0]['pagemap']['breadcrumb']
count = 0
# Build up region string
region_string = ''
for key in breadcrumb:
region = breadcrumb[count]['title']
if (count > 0) :
region_string = region + ', ' + region_string
else :
region_string = region;
count+=1
metatags = parsed_response['items'][0]['pagemap']['metatags']
country = breadcrumb[0]['title']
latitude = metatags[0]['place:location:latitude']
longitude = metatags[0]['place:location:longitude']
google_pin = 'https://www.google.com/maps/@?api=1&map_action=map&basemap=satellite&zoom=19¢er=' + latitude + ',' + longitude
link = metatags[0]['og:url']
if (' in ' in title):
title = title[:title.index(' in ')]
# Truncate values to 3rd decimal place
lat_decimal = latitude.index('.')
latitude = latitude[:lat_decimal+4]
long_decimal = longitude.index('.')
longitude = longitude[:long_decimal+4]
# Format comment response
return 'I found a route! [' + title + '](' + link + ') in ' + region_string + '\n\nGPS Location: [' + latitude + ', ' + longitude + ']('+google_pin+')' + '\n\n ' + '\n\n^^^I ^^^am ^^^a ^^^bot ^^^| ^^^Data ^^^from ^^^[theCrag.com](https://www.thecrag.com/) ^^^| ^^^Feedback ^^^welcome ^^^at ^^^[r/climbBot](https://www.reddit.com/r/climbBot/)'
if __name__ == "__main__":
bot = praw.Reddit(
user_agent='climb-bot posts additional information on climbing routes it finds, created by /u/Akondrich, email: [email protected]',
client_id='***',
client_secret='***',
username='climb-bot',
password='***')
subreddit = bot.subreddit('climbBot')
parse_titles(bot, subreddit)
|
mit
| -5,139,018,071,634,323,000 | 4,490,931,005,581,357,600 | 38.525641 | 343 | 0.67337 | false |
UXE/local-edx
|
cms/envs/common.py
|
1
|
24184
|
# -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-import, unused-wildcard-import
import imp
import os
import sys
import lms.envs.common
# Although this module itself may not use these imported variables, other dependent modules may.
from lms.envs.common import (
USE_TZ, TECH_SUPPORT_EMAIL, PLATFORM_NAME, BUGS_EMAIL, DOC_STORE_CONFIG, ALL_LANGUAGES, WIKI_ENABLED, MODULESTORE,
update_module_store_settings, ASSET_IGNORE_REGEX
)
from path import path
from warnings import simplefilter
from lms.lib.xblock.mixin import LmsBlockMixin
from dealer.git import git
from xmodule.modulestore.edit_info import EditInfoMixin
############################ FEATURE CONFIGURATION #############################
FEATURES = {
'USE_DJANGO_PIPELINE': True,
'GITHUB_PUSH': False,
# for consistency in user-experience, keep the value of the following 3 settings
# in sync with the ones in lms/envs/common.py
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_TEXTBOOK': True,
'ENABLE_STUDENT_NOTES': True,
'AUTH_USE_CERTIFICATES': False,
# email address for studio staff (eg to request course creation)
'STUDIO_REQUEST_EMAIL': '',
# Segment.io - must explicitly turn it on for production
'SEGMENT_IO': False,
# Enable URL that shows information about the status of various services
'ENABLE_SERVICE_STATUS': False,
# Don't autoplay videos for course authors
'AUTOPLAY_VIDEOS': False,
# If set to True, new Studio users won't be able to author courses unless
# edX has explicitly added them to the course creator group.
'ENABLE_CREATOR_GROUP': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': False,
# If set to True, Studio won't restrict the set of advanced components
# to just those pre-approved by edX
'ALLOW_ALL_ADVANCED_COMPONENTS': False,
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False,
# Allow editing of short description in course settings in cms
'EDITABLE_SHORT_DESCRIPTION': True,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': False,
# Toggles the embargo functionality, which enable embargoing for particular courses
'EMBARGO': False,
# Toggles the embargo site functionality, which enable embargoing for the whole site
'SITE_EMBARGOED': False,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
# Allow creating courses with non-ascii characters in the course id
'ALLOW_UNICODE_COURSE_ID': False,
# Prevent concurrent logins per user
'PREVENT_CONCURRENT_LOGINS': False,
# Turn off Advanced Security by default
'ADVANCED_SECURITY': False,
# Modulestore to use for new courses
'DEFAULT_STORE_FOR_NEW_COURSE': None,
}
ENABLE_JASMINE = False
############################# SET PATH INFORMATION #############################
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/cms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
LMS_ROOT = REPO_ROOT / "lms"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
GITHUB_REPO_ROOT = ENV_ROOT / "data"
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'lib')
# For geolocation ip database
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
GEOIPV6_PATH = REPO_ROOT / "common/static/data/geoip/GeoIPv6.dat"
############################# WEB CONFIGURATION #############################
# This is where we stick our compiled template files.
import tempfile
MAKO_MODULE_DIR = os.path.join(tempfile.gettempdir(), 'mako_cms')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [
PROJECT_ROOT / 'templates',
COMMON_ROOT / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_js' / 'templates',
]
for namespace, template_dirs in lms.envs.common.MAKO_TEMPLATES.iteritems():
MAKO_TEMPLATES['lms.' + namespace] = template_dirs
TEMPLATE_DIRS = MAKO_TEMPLATES['main']
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/signin'
LOGIN_URL = EDX_ROOT_URL + '/signin'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.core.context_processors.csrf',
'dealer.contrib.django.staff.context_processor', # access git revision
'contentstore.context_processors.doc_url',
)
# use the ratelimit backend to prevent brute force attacks
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
LMS_BASE = None
# These are standard regexes for pulling out info like course_ids, usage_ids, etc.
# They are used so that URLs with deprecated-format strings still work.
from lms.envs.common import (
COURSE_KEY_PATTERN, COURSE_ID_PATTERN, USAGE_KEY_PATTERN, ASSET_KEY_PATTERN
)
#################### CAPA External Code Evaluation #############################
XQUEUE_INTERFACE = {
'url': 'http://localhost:8888',
'django_auth': {'username': 'local',
'password': 'local'},
'basic_auth': None,
}
################################# Deprecation warnings #####################
# Ignore deprecation warnings (so we don't clutter Jenkins builds/production)
simplefilter('ignore')
################################# Middleware ###################################
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'request_cache.middleware.RequestCache',
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'method_override.middleware.MethodOverrideMiddleware',
# Instead of AuthenticationMiddleware, we use a cache-backed version
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'contentserver.middleware.StaticContentServer',
'crum.CurrentRequestUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
# Allows us to dark-launch particular languages
'dark_lang.middleware.DarkLangMiddleware',
'embargo.middleware.EmbargoMiddleware',
# Detects user-requested locale from 'accept-language' header in http request
'django.middleware.locale.LocaleMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# needs to run after locale middleware (or anything that modifies the request context)
'edxmako.middleware.MakoMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# for expiring inactive sessions
'session_inactivity_timeout.middleware.SessionInactivityTimeout',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Clickjacking protection can be enabled by setting this to 'DENY'
X_FRAME_OPTIONS = 'ALLOW'
############# XBlock Configuration ##########
# Import after sys.path fixup
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore import prefer_xmodules
from xmodule.x_module import XModuleMixin
# This should be moved into an XBlock Runtime/Application object
# once the responsibility of XBlock creation is moved out of modulestore - cpennington
XBLOCK_MIXINS = (LmsBlockMixin, InheritanceMixin, XModuleMixin, EditInfoMixin)
# Allow any XBlock in Studio
# You should also enable the ALLOW_ALL_ADVANCED_COMPONENTS feature flag, so that
# xblocks can be added via advanced settings
XBLOCK_SELECT_FUNCTION = prefer_xmodules
############################ Modulestore Configuration ################################
MODULESTORE_BRANCH = 'draft-preferred'
############################ DJANGO_BUILTINS ################################
# Change DEBUG/TEMPLATE_DEBUG in your environment settings files, not here
DEBUG = False
TEMPLATE_DEBUG = False
SESSION_COOKIE_SECURE = False
# Site info
SITE_ID = 1
SITE_NAME = "localhost:8001"
HTTPS = 'on'
ROOT_URLCONF = 'cms.urls'
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
EMAIL_USE_TLS = False
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
DEFAULT_FROM_EMAIL = '[email protected]'
DEFAULT_FEEDBACK_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
ADMINS = ()
MANAGERS = ADMINS
# Static content
STATIC_URL = '/static/' + git.revision + "/"
STATIC_ROOT = ENV_ROOT / "staticfiles" / git.revision
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
LMS_ROOT / "static",
# This is how you would use the textbook images locally
# ("book", ENV_ROOT / "book_images"),
]
# Locale/Internationalization
TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGES_BIDI = lms.envs.common.LANGUAGES_BIDI
LANGUAGES = lms.envs.common.LANGUAGES
LANGUAGE_DICT = dict(LANGUAGES)
USE_I18N = True
USE_L10N = True
# Localization strings (e.g. django.po) are under this directory
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
##### EMBARGO #####
EMBARGO_SITE_REDIRECT_URL = None
############################### Pipeline #######################################
STATICFILES_STORAGE = 'cms.lib.django_require.staticstorage.OptimizedCachedRequireJsStorage'
from rooted_paths import rooted_glob
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/normalize.css',
'css/vendor/font-awesome.css',
'css/vendor/html5-input-polyfills/number-polyfill.css',
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
'css/vendor/jquery.qtip.min.css',
'js/vendor/markitup/skins/simple/style.css',
'js/vendor/markitup/sets/wiki/style.css',
],
'output_filename': 'css/cms-style-vendor.css',
},
'style-vendor-tinymce-content': {
'source_filenames': [
'css/tinymce-studio-content-fonts.css',
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css',
'css/tinymce-studio-content.css'
],
'output_filename': 'css/cms-style-vendor-tinymce-content.css',
},
'style-vendor-tinymce-skin': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css'
],
'output_filename': 'css/cms-style-vendor-tinymce-skin.css',
},
'style-app': {
'source_filenames': [
'sass/style-app.css',
],
'output_filename': 'css/cms-style-app.css',
},
'style-app-extend1': {
'source_filenames': [
'sass/style-app-extend1.css',
],
'output_filename': 'css/cms-style-app-extend1.css',
},
'style-app-rtl': {
'source_filenames': [
'sass/style-app-rtl.css',
],
'output_filename': 'css/cms-style-app-rtl.css',
},
'style-app-extend1-rtl': {
'source_filenames': [
'sass/style-app-extend1-rtl.css',
],
'output_filename': 'css/cms-style-app-extend1-rtl.css',
},
'style-xmodule': {
'source_filenames': [
'sass/style-xmodule.css',
],
'output_filename': 'css/cms-style-xmodule.css',
},
'style-xmodule-rtl': {
'source_filenames': [
'sass/style-xmodule-rtl.css',
],
'output_filename': 'css/cms-style-xmodule-rtl.css',
},
'style-xmodule-annotations': {
'source_filenames': [
'css/vendor/ova/annotator.css',
'css/vendor/ova/edx-annotator.css',
'css/vendor/ova/video-js.min.css',
'css/vendor/ova/rangeslider.css',
'css/vendor/ova/share-annotator.css',
'css/vendor/ova/richText-annotator.css',
'css/vendor/ova/tags-annotator.css',
'css/vendor/ova/flagging-annotator.css',
'css/vendor/ova/diacritic-annotator.css',
'css/vendor/ova/grouping-annotator.css',
'css/vendor/ova/ova.css',
'js/vendor/ova/catch/css/main.css'
],
'output_filename': 'css/cms-style-xmodule-annotations.css',
},
}
# test_order: Determines the position of this chunk of javascript on
# the jasmine test page
PIPELINE_JS = {
'module-js': {
'source_filenames': (
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/modules/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'coffee/src/discussion/*.js')
),
'output_filename': 'js/cms-modules.js',
'test_order': 1
},
}
PIPELINE_COMPILERS = (
'pipeline.compilers.coffee.CoffeeScriptCompiler',
)
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = None
STATICFILES_IGNORE_PATTERNS = (
"*.py",
"*.pyc"
# it would be nice if we could do, for example, "**/*.scss",
# but these strings get passed down to the `fnmatch` module,
# which doesn't support that. :(
# http://docs.python.org/2/library/fnmatch.html
"sass/*.scss",
"sass/*/*.scss",
"sass/*/*/*.scss",
"sass/*/*/*/*.scss",
"coffee/*.coffee",
"coffee/*/*.coffee",
"coffee/*/*/*.coffee",
"coffee/*/*/*/*.coffee",
# Symlinks used by js-test-tool
"xmodule_js",
"common_static",
)
PIPELINE_YUI_BINARY = 'yui-compressor'
################################# DJANGO-REQUIRE ###############################
# The baseUrl to pass to the r.js optimizer, relative to STATIC_ROOT.
REQUIRE_BASE_URL = "./"
# The name of a build profile to use for your project, relative to REQUIRE_BASE_URL.
# A sensible value would be 'app.build.js'. Leave blank to use the built-in default build profile.
# Set to False to disable running the default profile (e.g. if only using it to build Standalone
# Modules)
REQUIRE_BUILD_PROFILE = "build.js"
# The name of the require.js script used by your project, relative to REQUIRE_BASE_URL.
REQUIRE_JS = "js/vendor/require.js"
# A dictionary of standalone modules to build with almond.js.
REQUIRE_STANDALONE_MODULES = {}
# Whether to run django-require in debug mode.
REQUIRE_DEBUG = False
# A tuple of files to exclude from the compilation result of r.js.
REQUIRE_EXCLUDE = ("build.txt",)
# The execution environment in which to run r.js: auto, node or rhino.
# auto will autodetect the environment and make use of node if available and rhino if not.
# It can also be a path to a custom class that subclasses require.environments.Environment and defines some "args" function that returns a list with the command arguments to execute.
REQUIRE_ENVIRONMENT = "node"
################################# CELERY ######################################
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
# Results configuration
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
# Exchange configuration
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
# Queues configuration
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############################## Video ##########################################
YOUTUBE = {
# YouTube JavaScript API
'API': 'www.youtube.com/iframe_api',
# URL to test YouTube availability
'TEST_URL': 'gdata.youtube.com/feeds/api/videos/',
# Current youtube api for requesting transcripts.
# For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
'TEXT_API': {
'url': 'video.google.com/timedtext',
'params': {
'lang': 'en',
'v': 'set_youtube_id_of_11_symbols_here',
},
},
}
############################ APPS #####################################
INSTALLED_APPS = (
# Standard apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'djcelery',
'south',
'method_override',
# Database-backed configuration
'config_models',
# Monitor the status of services
'service_status',
# Testing
'django_nose',
# For CMS
'contentstore',
'course_creators',
'student', # misleading name due to sharing with lms
'openedx.core.djangoapps.course_groups', # not used in cms (yet), but tests run
# Tracking
'track',
'eventtracking.django',
# Monitoring
'datadog',
# For asset pipelining
'edxmako',
'pipeline',
'staticfiles',
'static_replace',
'require',
# comment common
'django_comment_common',
# for course creator table
'django.contrib.admin',
# for managing course modes
'course_modes',
# Dark-launching languages
'dark_lang',
# Student identity reverification
'reverification',
# User preferences
'openedx.core.djangoapps.user_api',
'django_openid_auth',
'embargo',
# Monitoring signals
'monitoring',
# Course action state
'course_action_state',
# Additional problem types
'edx_jsme', # Molecular Structure
)
################# EDX MARKETING SITE ##################################
EDXMKTG_COOKIE_NAME = 'edxloggedin'
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
}
COURSES_WITH_UNSAFE_CODE = []
############################## EVENT TRACKING #################################
TRACK_MAX_EVENT = 50000
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
# We're already logging events, and we don't want to capture user
# names/passwords. Heartbeat events are likely not interesting.
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat']
EVENT_TRACKING_ENABLED = True
EVENT_TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'eventtracking.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking',
'max_event_size': TRACK_MAX_EVENT,
}
}
}
EVENT_TRACKING_PROCESSORS = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
}
]
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = None
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
### Apps only installed in some instances
OPTIONAL_APPS = (
'mentoring',
# edx-ora2
'submissions',
'openassessment',
'openassessment.assessment',
'openassessment.fileupload',
'openassessment.workflow',
'openassessment.xblock',
# edxval
'edxval'
)
for app_name in OPTIONAL_APPS:
# First attempt to only find the module rather than actually importing it,
# to avoid circular references - only try to import if it can't be found
# by find_module, which doesn't work with import hooks
try:
imp.find_module(app_name)
except ImportError:
try:
__import__(app_name)
except ImportError:
continue
INSTALLED_APPS += (app_name,)
### ADVANCED_SECURITY_CONFIG
# Empty by default
ADVANCED_SECURITY_CONFIG = {}
### External auth usage -- prefixes for ENROLLMENT_DOMAIN
SHIBBOLETH_DOMAIN_PREFIX = 'shib:'
OPENID_DOMAIN_PREFIX = 'openid:'
### Size of chunks into which asset uploads will be divided
UPLOAD_CHUNK_SIZE_IN_MB = 10
### Max size of asset uploads to GridFS
MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB = 10
# FAQ url to direct users to if they upload
# a file that exceeds the above size
MAX_ASSET_UPLOAD_FILE_SIZE_URL = ""
################ ADVANCED_COMPONENT_TYPES ###############
ADVANCED_COMPONENT_TYPES = [
'annotatable',
'textannotation', # module for annotating text (with annotation table)
'videoannotation', # module for annotating video (with annotation table)
'imageannotation', # module for annotating image (with annotation table)
'word_cloud',
'graphical_slider_tool',
'lti',
# XBlocks from pmitros repos are prototypes. They should not be used
# except for edX Learning Sciences experiments on edge.edx.org without
# further work to make them robust, maintainable, finalize data formats,
# etc.
'concept', # Concept mapper. See https://github.com/pmitros/ConceptXBlock
'done', # Lets students mark things as done. See https://github.com/pmitros/DoneXBlock
'audio', # Embed an audio file. See https://github.com/pmitros/AudioXBlock
'recommender', # Crowdsourced recommender. Prototype by dli&pmitros. Intended for roll-out in one place in one course.
'profile', # Prototype user profile XBlock. Used to test XBlock parameter passing. See https://github.com/pmitros/ProfileXBlock
'split_test',
'combinedopenended',
'peergrading',
'notes',
]
# Adding components in this list will disable the creation of new problem for those
# compoenents in studio. Existing problems will work fine and one can edit them in studio
DEPRECATED_ADVANCED_COMPONENT_TYPES = []
# Specify xblocks that should be treated as advanced problems. Each entry is a tuple
# specifying the xblock name and an optional YAML template to be used.
ADVANCED_PROBLEM_TYPES = [
{
'component': 'openassessment',
'boilerplate_name': None,
}
]
|
agpl-3.0
| 8,943,935,576,679,348,000 | -3,109,093,182,218,091,500 | 30.530639 | 182 | 0.663248 | false |
yuxng/Deep_ISM
|
ISM/lib/setup.py
|
1
|
6351
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
def find_in_path(name, path):
"Find a file in a search path"
#adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.iteritems():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.cython_bbox",
["utils/bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
),
Extension(
"utils.cython_nms",
["utils/nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
),
Extension(
"nms.cpu_nms",
["nms/cpu_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension('nms.gpu_nms',
['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with gcc
# the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
),
Extension('normals.gpu_normals',
['normals/compute_normals.cu', 'normals/gpu_normals.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with gcc
# the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include'], '/usr/local/include/eigen3']
)
]
setup(
name='fast_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
|
mit
| 1,538,584,670,687,865,000 | 9,025,732,268,405,828,000 | 37.490909 | 91 | 0.587781 | false |
bev-a-tron/pledgeservice
|
testlib/waitress/receiver.py
|
39
|
4849
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Data Chunk Receiver
"""
from waitress.utilities import find_double_newline
from waitress.utilities import BadRequest
class FixedStreamReceiver(object):
# See IStreamConsumer
completed = False
error = None
def __init__(self, cl, buf):
self.remain = cl
self.buf = buf
def __len__(self):
return self.buf.__len__()
def received(self, data):
'See IStreamConsumer'
rm = self.remain
if rm < 1:
self.completed = True # Avoid any chance of spinning
return 0
datalen = len(data)
if rm <= datalen:
self.buf.append(data[:rm])
self.remain = 0
self.completed = True
return rm
else:
self.buf.append(data)
self.remain -= datalen
return datalen
def getfile(self):
return self.buf.getfile()
def getbuf(self):
return self.buf
class ChunkedReceiver(object):
chunk_remainder = 0
control_line = b''
all_chunks_received = False
trailer = b''
completed = False
error = None
# max_control_line = 1024
# max_trailer = 65536
def __init__(self, buf):
self.buf = buf
def __len__(self):
return self.buf.__len__()
def received(self, s):
# Returns the number of bytes consumed.
if self.completed:
return 0
orig_size = len(s)
while s:
rm = self.chunk_remainder
if rm > 0:
# Receive the remainder of a chunk.
to_write = s[:rm]
self.buf.append(to_write)
written = len(to_write)
s = s[written:]
self.chunk_remainder -= written
elif not self.all_chunks_received:
# Receive a control line.
s = self.control_line + s
pos = s.find(b'\n')
if pos < 0:
# Control line not finished.
self.control_line = s
s = ''
else:
# Control line finished.
line = s[:pos]
s = s[pos + 1:]
self.control_line = b''
line = line.strip()
if line:
# Begin a new chunk.
semi = line.find(b';')
if semi >= 0:
# discard extension info.
line = line[:semi]
try:
sz = int(line.strip(), 16) # hexadecimal
except ValueError: # garbage in input
self.error = BadRequest(
'garbage in chunked encoding input')
sz = 0
if sz > 0:
# Start a new chunk.
self.chunk_remainder = sz
else:
# Finished chunks.
self.all_chunks_received = True
# else expect a control line.
else:
# Receive the trailer.
trailer = self.trailer + s
if trailer.startswith(b'\r\n'):
# No trailer.
self.completed = True
return orig_size - (len(trailer) - 2)
elif trailer.startswith(b'\n'):
# No trailer.
self.completed = True
return orig_size - (len(trailer) - 1)
pos = find_double_newline(trailer)
if pos < 0:
# Trailer not finished.
self.trailer = trailer
s = b''
else:
# Finished the trailer.
self.completed = True
self.trailer = trailer[:pos]
return orig_size - (len(trailer) - pos)
return orig_size
def getfile(self):
return self.buf.getfile()
def getbuf(self):
return self.buf
|
agpl-3.0
| 2,484,584,976,280,837,600 | 1,887,454,447,155,923,500 | 31.543624 | 78 | 0.453083 | false |
sethshill/final
|
build/lib.linux-armv7l-2.7/bibliopixel/animation.py
|
2
|
14970
|
import time
import log
from led import LEDMatrix
from led import LEDStrip
from led import LEDCircle
import colors
from util import d
import threading
class animThread(threading.Thread):
def __init__(self, anim, args):
super(animThread, self).__init__()
self.setDaemon(True)
self._anim = anim
self._args = args
def run(self):
log.debug("Starting thread...")
self._anim._run(**self._args)
log.debug("Thread Complete")
class BaseAnimation(object):
def __init__(self, led):
self._led = led
self.animComplete = False
self._step = 0
self._timeRef = 0
self._internalDelay = None
self._sleep = None
self._threaded = False
self._thread = None
self._callback = None
self._stopEvent = threading.Event()
self._stopEvent.clear()
self._led._threadedAnim = False
self._free_run = False
def _msTime(self):
return time.time() * 1000.0
def preRun(self, amt=1):
self._led.all_off()
def preStep(self, amt=1):
pass
def postStep(self, amt=1):
pass
def step(self, amt=1):
raise RuntimeError("Base class step() called. This shouldn't happen")
def stopThread(self, wait=False):
if self._thread:
self._stopEvent.set()
if wait:
self._thread.join()
def __enter__(self):
return self
def _exit(self, type, value, traceback):
pass
def __exit__(self, type, value, traceback):
self._exit(type, value, traceback)
self.stopThread(wait=True)
self._led.all_off()
self._led.update()
self._led.waitForUpdate()
def cleanup(self):
return self.__exit__(None, None, None)
def stopped(self):
return not (self._thread and self._thread.isAlive())
def _run(self, amt, fps, sleep, max_steps, untilComplete, max_cycles, seconds):
self.preRun()
# calculate sleep time base on desired Frames per Second
if fps:
sleep = int(1000 / fps)
if seconds is not None:
max_steps = int((seconds * 1000) / sleep)
initSleep = sleep
self._step = 0
cur_step = 0
cycle_count = 0
self.animComplete = False
while (not self._stopEvent.isSet() and
((max_steps == 0 and not untilComplete) or
(max_steps > 0 and cur_step < max_steps) or
(max_steps == 0 and untilComplete and not self.animComplete))):
self._timeRef = self._msTime()
start = self._msTime()
if hasattr(self, "_input_dev"):
self._keys = self._input_dev.getKeys()
self.preStep(amt)
self.step(amt)
self.postStep(amt)
mid = self._msTime()
if self._free_run:
sleep = None
elif self._internalDelay:
sleep = self._internalDelay
elif initSleep:
sleep = initSleep
self._sleep = sleep
self._led._frameGenTime = int(mid - start)
self._led._frameTotalTime = sleep
self._led.update()
now = self._msTime()
if self.animComplete and max_cycles > 0:
if cycle_count < max_cycles - 1:
cycle_count += 1
self.animComplete = False
stepTime = int(mid - start)
if self._led._threadedUpdate:
updateTime = int(self._led.lastThreadedUpdate())
totalTime = updateTime
else:
updateTime = int(now - mid)
totalTime = stepTime + updateTime
if self._led._threadedUpdate:
log.debug(
"Frame: %sms / Update Max: %sms", stepTime, updateTime)
else:
log.debug("%sms/%sfps / Frame: %sms / Update: %sms",
totalTime, int(1000 / max(totalTime, 1)), stepTime, updateTime)
if sleep:
diff = (self._msTime() - self._timeRef)
t = max(0, (sleep - diff) / 1000.0)
if t == 0:
log.warning(
"Frame-time of %dms set, but took %dms!", sleep, diff)
if self._threaded:
self._stopEvent.wait(t)
else:
time.sleep(t)
cur_step += 1
self._exit(None, None, None)
if self._callback:
self._callback(self)
def run(self, amt=1, fps=None, sleep=None, max_steps=0, untilComplete=False, max_cycles=0, threaded=False, joinThread=False, callback=None, seconds=None):
self._led._threadedAnim = self._threaded = threaded
if self._threaded:
self._stopEvent.clear()
self._callback = callback
if self._threaded:
args = {}
l = locals()
run_params = ["amt", "fps", "sleep",
"max_steps", "untilComplete", "max_cycles", "seconds"]
for p in run_params:
if p in l:
args[p] = l[p]
self._thread = animThread(self, args)
self._thread.start()
if joinThread:
self._thread.join()
else:
self._run(amt, fps, sleep, max_steps, untilComplete, max_cycles, seconds)
RUN_PARAMS = [{
"id": "amt",
"label": "Step Amount",
"type": "int",
"min": 1,
"default": 1,
"help": "Amount to step animation by on each frame. May not be used on some animations."
}, {
"id": "fps",
"label": "Framerate",
"type": "int",
"default": 15,
"min": 1,
"help": "Framerate at which to run animation."
}, {
"id": "seconds",
"label": "Run Seconds",
"type": "int",
"default": None,
"min": 0,
"help": "Number of seconds to run animation for, based on framerate."
}, {
"id": "max_steps",
"label": "Max Frames",
"type": "int",
"min": 0,
"default": 0,
"help": "Total frames to run before stopping."
}, {
"id": "untilComplete",
"label": "Until Complete",
"type": "bool",
"default": False,
"help": "Run until animation marks itself as complete. If supported."
}, {
"id": "max_cycles",
"label": "Max Cycles",
"type": "int",
"min": 1,
"default": 1,
"help": "If Until Complete is set, animation will repeat this many times."
}, ]
class OffAnim(BaseAnimation):
def __init__(self, led, timeout=10):
super(OffAnim, self).__init__(led)
self._internalDelay = timeout * 1000
def step(self, amt=1):
self._led.all_off()
class AnimationQueue(BaseAnimation):
def __init__(self, led, anims=None):
super(AnimationQueue, self).__init__(led)
self.anims = anims or []
self.curAnim = None
self.animIndex = 0
self._internalDelay = 0 # never wait
self.fps = None
self.untilComplete = False
# overriding to handle all the animations
def stopThread(self, wait=False):
for a, r in self.anims:
# a bit of a hack. they aren't threaded, but stops them anyway
a._stopEvent.set()
super(AnimationQueue, self).stopThread(wait)
def addAnim(self, anim, amt=1, fps=None, max_steps=0, untilComplete=False, max_cycles=0, seconds=None):
a = (
anim,
{
"amt": amt,
"fps": fps,
"max_steps": max_steps,
"untilComplete": untilComplete,
"max_cycles": max_cycles,
"seconds": seconds
}
)
self.anims.append(a)
def preRun(self, amt=1):
if len(self.anims) == 0:
raise Exception("Must provide at least one animation.")
self.animIndex = -1
def run(self, amt=1, fps=None, sleep=None, max_steps=0, untilComplete=False, max_cycles=0, threaded=False, joinThread=False, callback=None, seconds=None):
self.fps = fps
self.untilComplete = untilComplete
super(AnimationQueue, self).run(amt=1, fps=None, sleep=None, max_steps=0, untilComplete=untilComplete,
max_cycles=0, threaded=threaded, joinThread=joinThread, callback=callback, seconds=seconds)
def step(self, amt=1):
self.animIndex += 1
if self.animIndex >= len(self.anims):
if self.untilComplete:
self.animComplete = True
else:
self.animIndex = 0
if not self.animComplete:
self.curAnim = self.anims[self.animIndex]
anim, run = self.curAnim
run.update(threaded=False, joinThread=False, callback=None)
run['fps'] = run.get('fps') or self.fps
anim.run(**(run))
RUN_PARAMS = [{
"id": "fps",
"label": "Default Framerate",
"type": "int",
"default": None,
"min": 1,
"help": "Default framerate to run all animations in queue."
}, {
"id": "untilComplete",
"label": "Until Complete",
"type": "bool",
"default": False,
"help": "Run until animation marks itself as complete. If supported."
}]
class BaseStripAnim(BaseAnimation):
def __init__(self, led, start=0, end=-1):
super(BaseStripAnim, self).__init__(led)
if not isinstance(led, LEDStrip):
raise RuntimeError("Must use LEDStrip with Strip Animations!")
self._start = max(start, 0)
self._end = end
if self._end < 0 or self._end > self._led.lastIndex:
self._end = self._led.lastIndex
self._size = self._end - self._start + 1
class BaseMatrixAnim(BaseAnimation):
def __init__(self, led, width=0, height=0, startX=0, startY=0):
super(BaseMatrixAnim, self).__init__(led)
if not isinstance(led, LEDMatrix):
raise RuntimeError("Must use LEDMatrix with Matrix Animations!")
self.width = width or led.width
self.height = height or led.height
self.startX = startX
self.startY = startY
class BaseGameAnim(BaseMatrixAnim):
def __init__(self, led, inputDev):
super(BaseGameAnim, self).__init__(led)
self._input_dev = inputDev
self._keys = None
self._lastKeys = None
self._speedStep = 0
self._speeds = {}
self._keyfuncs = {}
def _exit(self, type, value, traceback):
if hasattr(self._input_dev, 'setLightsOff'):
self._input_dev.setLightsOff(5)
self._input_dev.close()
def setSpeed(self, name, speed):
self._speeds[name] = speed
def getSpeed(self, name):
return self._speeds.get(name)
def _checkSpeed(self, speed):
return not (self._speedStep % speed)
def checkSpeed(self, name):
return name in self._speeds and self._checkSpeed(self._speeds[name])
def addKeyFunc(self, key, func, speed=1, hold=True):
if not isinstance(key, list):
key = [key]
for k in key:
self._keyfuncs[k] = d({
"func": func,
"speed": speed,
"hold": hold,
"last": False,
"inter": False
})
def handleKeys(self):
kf = self._keyfuncs
for key in self._keys:
val = self._keys[key]
if key in kf:
cfg = kf[key]
speedPass = self._checkSpeed(cfg.speed)
if cfg.hold:
if speedPass:
if (val or cfg.inter):
cfg.func()
else:
cfg.inter = cfg.last = val
elif speedPass:
if (val or cfg.inter) and not cfg.last:
cfg.func()
cfg.inter = cfg.last = val
else:
cfg.inter |= val
self._lastKeys = self._keys
def preStep(self, amt):
pass
def postStep(self, amt):
self._speedStep += 1
class BaseCircleAnim(BaseAnimation):
def __init__(self, led):
super(BaseCircleAnim, self).__init__(led)
if not isinstance(led, LEDCircle):
raise RuntimeError("Must use LEDCircle with Circle Animations!")
self.rings = led.rings
self.ringCount = led.ringCount
self.lastRing = led.lastRing
self.ringSteps = led.ringSteps
class StripChannelTest(BaseStripAnim):
def __init__(self, led):
super(StripChannelTest, self).__init__(led)
self._internalDelay = 500
self.colors = [colors.Red, colors.Green, colors.Blue, colors.White]
def step(self, amt=1):
self._led.set(0, colors.Red)
self._led.set(1, colors.Green)
self._led.set(2, colors.Green)
self._led.set(3, colors.Blue)
self._led.set(4, colors.Blue)
self._led.set(5, colors.Blue)
color = self._step % 4
self._led.fill(self.colors[color], 7, 9)
self._step += 1
class MatrixChannelTest(BaseMatrixAnim):
def __init__(self, led):
super(MatrixChannelTest, self).__init__(led, 0, 0)
self._internalDelay = 500
self.colors = [colors.Red, colors.Green, colors.Blue, colors.White]
def step(self, amt=1):
self._led.drawLine(0, 0, 0, self.height - 1, colors.Red)
self._led.drawLine(1, 0, 1, self.height - 1, colors.Green)
self._led.drawLine(2, 0, 2, self.height - 1, colors.Green)
self._led.drawLine(3, 0, 3, self.height - 1, colors.Blue)
self._led.drawLine(4, 0, 4, self.height - 1, colors.Blue)
self._led.drawLine(5, 0, 5, self.height - 1, colors.Blue)
color = self._step % 4
self._led.fillRect(7, 0, 3, self.height, self.colors[color])
self._step += 1
class MatrixCalibrationTest(BaseMatrixAnim):
def __init__(self, led):
super(MatrixCalibrationTest, self).__init__(led, 0, 0)
self._internalDelay = 500
self.colors = [colors.Red, colors.Green, colors.Green,
colors.Blue, colors.Blue, colors.Blue]
def step(self, amt=1):
self._led.all_off()
i = self._step % self.width
for x in range(i + 1):
c = self.colors[x % len(self.colors)]
self._led.drawLine(x, 0, x, i, c)
self.animComplete = (i == (self.width - 1))
self._step += 1
|
mit
| 8,796,675,899,046,583,000 | -7,779,853,745,855,515,000 | 29.120724 | 158 | 0.52191 | false |
protatremy/buildbot
|
master/buildbot/test/unit/test_scripts_upgrade_master.py
|
10
|
8417
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import mock
from twisted.internet import defer
from twisted.python.compat import NativeStringIO
from twisted.trial import unittest
from buildbot import config as config_module
from buildbot.db import connector
from buildbot.db import masters
from buildbot.db import model
from buildbot.scripts import base
from buildbot.scripts import upgrade_master
from buildbot.test.util import dirs
from buildbot.test.util import misc
from buildbot.test.util import www
def mkconfig(**kwargs):
config = dict(quiet=False, replace=False, basedir='test')
config.update(kwargs)
return config
class TestUpgradeMaster(dirs.DirsMixin, misc.StdoutAssertionsMixin,
unittest.TestCase):
def setUp(self):
# createMaster is decorated with @in_reactor, so strip that decoration
# since the master is already running
self.patch(upgrade_master, 'upgradeMaster',
upgrade_master.upgradeMaster._orig)
self.setUpDirs('test')
self.setUpStdoutAssertions()
def patchFunctions(self, basedirOk=True, configOk=True):
self.calls = []
def checkBasedir(config):
self.calls.append('checkBasedir')
return basedirOk
self.patch(base, 'checkBasedir', checkBasedir)
def loadConfig(config, configFileName='master.cfg'):
self.calls.append('loadConfig')
return config_module.MasterConfig() if configOk else False
self.patch(base, 'loadConfig', loadConfig)
def upgradeFiles(config):
self.calls.append('upgradeFiles')
self.patch(upgrade_master, 'upgradeFiles', upgradeFiles)
def upgradeDatabase(config, master_cfg):
self.assertIsInstance(master_cfg, config_module.MasterConfig)
self.calls.append('upgradeDatabase')
self.patch(upgrade_master, 'upgradeDatabase', upgradeDatabase)
# tests
def test_upgradeMaster_success(self):
self.patchFunctions()
d = upgrade_master.upgradeMaster(mkconfig(), _noMonkey=True)
@d.addCallback
def check(rv):
self.assertEqual(rv, 0)
self.assertInStdout('upgrade complete')
return d
def test_upgradeMaster_quiet(self):
self.patchFunctions()
d = upgrade_master.upgradeMaster(mkconfig(quiet=True), _noMonkey=True)
@d.addCallback
def check(rv):
self.assertEqual(rv, 0)
self.assertWasQuiet()
return d
def test_upgradeMaster_bad_basedir(self):
self.patchFunctions(basedirOk=False)
d = upgrade_master.upgradeMaster(mkconfig(), _noMonkey=True)
@d.addCallback
def check(rv):
self.assertEqual(rv, 1)
return d
def test_upgradeMaster_bad_config(self):
self.patchFunctions(configOk=False)
d = upgrade_master.upgradeMaster(mkconfig(), _noMonkey=True)
@d.addCallback
def check(rv):
self.assertEqual(rv, 1)
return d
class TestUpgradeMasterFunctions(www.WwwTestMixin, dirs.DirsMixin,
misc.StdoutAssertionsMixin, unittest.TestCase):
def setUp(self):
self.setUpDirs('test')
self.basedir = os.path.abspath(os.path.join('test', 'basedir'))
self.setUpStdoutAssertions()
def tearDown(self):
self.tearDownDirs()
def writeFile(self, path, contents):
with open(path, 'wt') as f:
f.write(contents)
def readFile(self, path):
with open(path, 'rt') as f:
return f.read()
# tests
def test_installFile(self):
self.writeFile('test/srcfile', 'source data')
upgrade_master.installFile(mkconfig(), 'test/destfile', 'test/srcfile')
self.assertEqual(self.readFile('test/destfile'), 'source data')
self.assertInStdout('creating test/destfile')
def test_installFile_existing_differing(self):
self.writeFile('test/srcfile', 'source data')
self.writeFile('test/destfile', 'dest data')
upgrade_master.installFile(mkconfig(), 'test/destfile', 'test/srcfile')
self.assertEqual(self.readFile('test/destfile'), 'dest data')
self.assertEqual(self.readFile('test/destfile.new'), 'source data')
self.assertInStdout('writing new contents to')
def test_installFile_existing_differing_overwrite(self):
self.writeFile('test/srcfile', 'source data')
self.writeFile('test/destfile', 'dest data')
upgrade_master.installFile(mkconfig(), 'test/destfile', 'test/srcfile',
overwrite=True)
self.assertEqual(self.readFile('test/destfile'), 'source data')
self.assertFalse(os.path.exists('test/destfile.new'))
self.assertInStdout('overwriting')
def test_installFile_existing_same(self):
self.writeFile('test/srcfile', 'source data')
self.writeFile('test/destfile', 'source data')
upgrade_master.installFile(mkconfig(), 'test/destfile', 'test/srcfile')
self.assertEqual(self.readFile('test/destfile'), 'source data')
self.assertFalse(os.path.exists('test/destfile.new'))
self.assertWasQuiet()
def test_installFile_quiet(self):
self.writeFile('test/srcfile', 'source data')
upgrade_master.installFile(mkconfig(quiet=True), 'test/destfile',
'test/srcfile')
self.assertWasQuiet()
def test_upgradeFiles(self):
upgrade_master.upgradeFiles(mkconfig())
for f in [
'test/master.cfg.sample',
]:
self.assertTrue(os.path.exists(f), "%s not found" % f)
self.assertInStdout('upgrading basedir')
def test_upgradeFiles_notice_about_unused_public_html(self):
os.mkdir('test/public_html')
self.writeFile('test/public_html/index.html', 'INDEX')
upgrade_master.upgradeFiles(mkconfig())
self.assertInStdout('public_html is not used')
@defer.inlineCallbacks
def test_upgradeDatabase(self):
setup = mock.Mock(side_effect=lambda **kwargs: defer.succeed(None))
self.patch(connector.DBConnector, 'setup', setup)
upgrade = mock.Mock(side_effect=lambda **kwargs: defer.succeed(None))
self.patch(model.Model, 'upgrade', upgrade)
setAllMastersActiveLongTimeAgo = mock.Mock(
side_effect=lambda **kwargs: defer.succeed(None))
self.patch(masters.MastersConnectorComponent,
'setAllMastersActiveLongTimeAgo', setAllMastersActiveLongTimeAgo)
yield upgrade_master.upgradeDatabase(
mkconfig(basedir='test', quiet=True),
config_module.MasterConfig())
setup.asset_called_with(check_version=False, verbose=False)
upgrade.assert_called_with()
self.assertWasQuiet()
@defer.inlineCallbacks
def test_upgradeDatabaseFail(self):
setup = mock.Mock(side_effect=lambda **kwargs: defer.succeed(None))
self.patch(connector.DBConnector, 'setup', setup)
self.patch(sys, 'stderr', NativeStringIO())
upgrade = mock.Mock(
side_effect=lambda **kwargs: defer.fail(Exception("o noz")))
self.patch(model.Model, 'upgrade', upgrade)
ret = yield upgrade_master._upgradeMaster(
mkconfig(basedir='test', quiet=True),
config_module.MasterConfig())
self.assertEqual(ret, 1)
self.assertIn("problem while upgrading!:\nTraceback (most recent call last):\n",
sys.stderr.getvalue())
self.assertIn("o noz", sys.stderr.getvalue())
|
gpl-2.0
| 4,864,781,312,834,571,000 | -4,615,349,125,890,004,000 | 36.914414 | 88 | 0.659736 | false |
tsl143/addons-server
|
src/olympia/zadmin/management/commands/addusertogroup.py
|
2
|
1528
|
from django.core.management.base import BaseCommand, CommandError
from django.db import IntegrityError
import olympia.core.logger
from olympia.access.models import Group, GroupUser
from olympia.users.models import UserProfile
class Command(BaseCommand):
help = 'Add a new user to a group.'
log = olympia.core.logger.getLogger('z.users')
def add_arguments(self, parser):
parser.add_argument('user', type=unicode, help='User id or email')
parser.add_argument('group_id', type=int, help='Group id')
def handle(self, *args, **options):
do_adduser(options['user'], options['group_id'])
msg = 'Adding {user} to {group}\n'.format(
user=options['user'], group=options['group_id'])
self.log.info(msg)
self.stdout.write(msg)
def do_adduser(user, group):
try:
if '@' in user:
user = UserProfile.objects.get(email=user)
elif user.isdigit():
user = UserProfile.objects.get(pk=user)
else:
raise CommandError('Unknown input for user.')
group = Group.objects.get(pk=group)
GroupUser.objects.create(user=user, group=group)
except IntegrityError, e:
raise CommandError('User is already in that group? %s' % e)
except UserProfile.DoesNotExist:
raise CommandError('User ({user}) does not exist.'.format(user=user))
except Group.DoesNotExist:
raise CommandError('Group ({group}) does not exist.'
.format(group=group))
|
bsd-3-clause
| -346,028,863,727,366,140 | 7,624,842,753,980,745,000 | 32.217391 | 77 | 0.643325 | false |
gnowxilef/youtube-dl
|
youtube_dl/extractor/tmz.py
|
65
|
2138
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class TMZIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tmz\.com/videos/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'http://www.tmz.com/videos/0_okj015ty/',
'md5': '4d22a51ef205b6c06395d8394f72d560',
'info_dict': {
'id': '0_okj015ty',
'ext': 'mp4',
'title': 'Kim Kardashian\'s Boobs Unlock a Mystery!',
'description': 'Did Kim Kardasain try to one-up Khloe by one-upping Kylie??? Or is she just showing off her amazing boobs?',
'timestamp': 1394747163,
'uploader_id': 'batchUser',
'upload_date': '20140313',
}
}, {
'url': 'http://www.tmz.com/videos/0-cegprt2p/',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url).replace('-', '_')
return self.url_result('kaltura:591531:%s' % video_id, 'Kaltura', video_id)
class TMZArticleIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tmz\.com/\d{4}/\d{2}/\d{2}/(?P<id>[^/]+)/?'
_TEST = {
'url': 'http://www.tmz.com/2015/04/19/bobby-brown-bobbi-kristina-awake-video-concert',
'md5': '3316ff838ae5bb7f642537825e1e90d2',
'info_dict': {
'id': '0_6snoelag',
'ext': 'mov',
'title': 'Bobby Brown Tells Crowd ... Bobbi Kristina is Awake',
'description': 'Bobby Brown stunned his audience during a concert Saturday night, when he told the crowd, "Bobbi is awake. She\'s watching me."',
'timestamp': 1429467813,
'upload_date': '20150419',
'uploader_id': 'batchUser',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
embedded_video_info = self._parse_json(self._html_search_regex(
r'tmzVideoEmbed\(({.+?})\);', webpage, 'embedded video info'),
video_id)
return self.url_result(
'http://www.tmz.com/videos/%s/' % embedded_video_info['id'])
|
unlicense
| 6,802,249,694,943,736,000 | 2,537,934,449,957,031,000 | 37.178571 | 158 | 0.556127 | false |
BaichuanWu/Blog_on_django
|
site-packages/django/contrib/gis/tests/distapp/tests.py
|
54
|
19622
|
from __future__ import unicode_literals
from unittest import skipUnless
from django.db import connection
from django.db.models import Q
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.measure import D # alias for Distance
from django.contrib.gis.tests.utils import (
HAS_SPATIAL_DB, mysql, oracle, postgis, spatialite, no_oracle, no_spatialite
)
from django.test import TestCase
if HAS_GEOS and HAS_SPATIAL_DB:
from django.contrib.gis.geos import GEOSGeometry, LineString
from .models import (AustraliaCity, Interstate, SouthTexasInterstate,
SouthTexasCity, SouthTexasCityFt, CensusZipcode, SouthTexasZipcode)
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB and not mysql,
"Geos and spatial db (not mysql) are required.")
class DistanceTest(TestCase):
if HAS_GEOS and HAS_SPATIAL_DB:
# A point we are testing distances with -- using a WGS84
# coordinate that'll be implicitly transformed to that to
# the coordinate system of the field, EPSG:32140 (Texas South Central
# w/units in meters)
stx_pnt = GEOSGeometry('POINT (-95.370401017314293 29.704867409475465)', 4326)
# Another one for Australia
au_pnt = GEOSGeometry('POINT (150.791 -34.4919)', 4326)
def get_names(self, qs):
cities = [c.name for c in qs]
cities.sort()
return cities
def test_init(self):
"""
Test initialization of distance models.
"""
self.assertEqual(9, SouthTexasCity.objects.count())
self.assertEqual(9, SouthTexasCityFt.objects.count())
self.assertEqual(11, AustraliaCity.objects.count())
self.assertEqual(4, SouthTexasZipcode.objects.count())
self.assertEqual(4, CensusZipcode.objects.count())
self.assertEqual(1, Interstate.objects.count())
self.assertEqual(1, SouthTexasInterstate.objects.count())
@no_spatialite
def test_dwithin(self):
"""
Test the `dwithin` lookup type.
"""
# Distances -- all should be equal (except for the
# degree/meter pair in au_cities, that's somewhat
# approximate).
tx_dists = [(7000, 22965.83), D(km=7), D(mi=4.349)]
au_dists = [(0.5, 32000), D(km=32), D(mi=19.884)]
# Expected cities for Australia and Texas.
tx_cities = ['Downtown Houston', 'Southside Place']
au_cities = ['Mittagong', 'Shellharbour', 'Thirroul', 'Wollongong']
# Performing distance queries on two projected coordinate systems one
# with units in meters and the other in units of U.S. survey feet.
for dist in tx_dists:
if isinstance(dist, tuple):
dist1, dist2 = dist
else:
dist1 = dist2 = dist
qs1 = SouthTexasCity.objects.filter(point__dwithin=(self.stx_pnt, dist1))
qs2 = SouthTexasCityFt.objects.filter(point__dwithin=(self.stx_pnt, dist2))
for qs in qs1, qs2:
self.assertEqual(tx_cities, self.get_names(qs))
# Now performing the `dwithin` queries on a geodetic coordinate system.
for dist in au_dists:
if isinstance(dist, D) and not oracle:
type_error = True
else:
type_error = False
if isinstance(dist, tuple):
if oracle:
dist = dist[1]
else:
dist = dist[0]
# Creating the query set.
qs = AustraliaCity.objects.order_by('name')
if type_error:
# A ValueError should be raised on PostGIS when trying to pass
# Distance objects into a DWithin query using a geodetic field.
self.assertRaises(ValueError, AustraliaCity.objects.filter(point__dwithin=(self.au_pnt, dist)).count)
else:
self.assertListEqual(au_cities, self.get_names(qs.filter(point__dwithin=(self.au_pnt, dist))))
def test_distance_projected(self):
"""
Test the `distance` GeoQuerySet method on projected coordinate systems.
"""
# The point for La Grange, TX
lagrange = GEOSGeometry('POINT(-96.876369 29.905320)', 4326)
# Reference distances in feet and in meters. Got these values from
# using the provided raw SQL statements.
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 32140)) FROM distapp_southtexascity;
m_distances = [147075.069813, 139630.198056, 140888.552826,
138809.684197, 158309.246259, 212183.594374,
70870.188967, 165337.758878, 139196.085105]
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 2278)) FROM distapp_southtexascityft;
# Oracle 11 thinks this is not a projected coordinate system, so it's
# not tested.
ft_distances = [482528.79154625, 458103.408123001, 462231.860397575,
455411.438904354, 519386.252102563, 696139.009211594,
232513.278304279, 542445.630586414, 456679.155883207]
# Testing using different variations of parameters and using models
# with different projected coordinate systems.
dist1 = SouthTexasCity.objects.distance(lagrange, field_name='point')
dist2 = SouthTexasCity.objects.distance(lagrange) # Using GEOSGeometry parameter
if spatialite or oracle:
dist_qs = [dist1, dist2]
else:
dist3 = SouthTexasCityFt.objects.distance(lagrange.ewkt) # Using EWKT string parameter.
dist4 = SouthTexasCityFt.objects.distance(lagrange)
dist_qs = [dist1, dist2, dist3, dist4]
# Original query done on PostGIS, have to adjust AlmostEqual tolerance
# for Oracle.
tol = 2 if oracle else 5
# Ensuring expected distances are returned for each distance queryset.
for qs in dist_qs:
for i, c in enumerate(qs):
self.assertAlmostEqual(m_distances[i], c.distance.m, tol)
self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, tol)
@no_spatialite
def test_distance_geodetic(self):
"""
Test the `distance` GeoQuerySet method on geodetic coordinate systems.
"""
tol = 2 if oracle else 5
# Testing geodetic distance calculation with a non-point geometry
# (a LineString of Wollongong and Shellharbour coords).
ls = LineString(((150.902, -34.4245), (150.87, -34.5789)))
if oracle or connection.ops.geography:
# Reference query:
# SELECT ST_distance_sphere(point, ST_GeomFromText('LINESTRING(150.9020 -34.4245,150.8700 -34.5789)', 4326)) FROM distapp_australiacity ORDER BY name;
distances = [1120954.92533513, 140575.720018241, 640396.662906304,
60580.9693849269, 972807.955955075, 568451.8357838,
40435.4335201384, 0, 68272.3896586844, 12375.0643697706, 0]
qs = AustraliaCity.objects.distance(ls).order_by('name')
for city, distance in zip(qs, distances):
# Testing equivalence to within a meter.
self.assertAlmostEqual(distance, city.distance.m, 0)
else:
# PostGIS 1.4 and below is limited to disance queries only
# to/from point geometries, check for raising of ValueError.
self.assertRaises(ValueError, AustraliaCity.objects.distance, ls)
self.assertRaises(ValueError, AustraliaCity.objects.distance, ls.wkt)
# Got the reference distances using the raw SQL statements:
# SELECT ST_distance_spheroid(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326), 'SPHEROID["WGS 84",6378137.0,298.257223563]') FROM distapp_australiacity WHERE (NOT (id = 11));
# SELECT ST_distance_sphere(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326)) FROM distapp_australiacity WHERE (NOT (id = 11)); st_distance_sphere
if connection.ops.postgis and connection.ops.proj_version_tuple() >= (4, 7, 0):
# PROJ.4 versions 4.7+ have updated datums, and thus different
# distance values.
spheroid_distances = [60504.0628957201, 77023.9489850262, 49154.8867574404,
90847.4358768573, 217402.811919332, 709599.234564757,
640011.483550888, 7772.00667991925, 1047861.78619339,
1165126.55236034]
sphere_distances = [60580.9693849267, 77144.0435286473, 49199.4415344719,
90804.7533823494, 217713.384600405, 709134.127242793,
639828.157159169, 7786.82949717788, 1049204.06569028,
1162623.7238134]
else:
spheroid_distances = [60504.0628825298, 77023.948962654, 49154.8867507115,
90847.435881812, 217402.811862568, 709599.234619957,
640011.483583758, 7772.00667666425, 1047861.7859506,
1165126.55237647]
sphere_distances = [60580.7612632291, 77143.7785056615, 49199.2725132184,
90804.4414289463, 217712.63666124, 709131.691061906,
639825.959074112, 7786.80274606706, 1049200.46122281,
1162619.7297006]
# Testing with spheroid distances first.
hillsdale = AustraliaCity.objects.get(name='Hillsdale')
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point, spheroid=True)
for i, c in enumerate(qs):
self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol)
if postgis:
# PostGIS uses sphere-only distances by default, testing these as well.
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point)
for i, c in enumerate(qs):
self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol)
@no_oracle # Oracle already handles geographic distance calculation.
def test_distance_transform(self):
"""
Test the `distance` GeoQuerySet method used with `transform` on a geographic field.
"""
# Normally you can't compute distances from a geometry field
# that is not a PointField (on PostGIS 1.4 and below).
if not connection.ops.geography:
self.assertRaises(ValueError, CensusZipcode.objects.distance, self.stx_pnt)
# We'll be using a Polygon (created by buffering the centroid
# of 77005 to 100m) -- which aren't allowed in geographic distance
# queries normally, however our field has been transformed to
# a non-geographic system.
z = SouthTexasZipcode.objects.get(name='77005')
# Reference query:
# SELECT ST_Distance(ST_Transform("distapp_censuszipcode"."poly", 32140), ST_GeomFromText('<buffer_wkt>', 32140)) FROM "distapp_censuszipcode";
dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242]
# Having our buffer in the SRID of the transformation and of the field
# -- should get the same results. The first buffer has no need for
# transformation SQL because it is the same SRID as what was given
# to `transform()`. The second buffer will need to be transformed,
# however.
buf1 = z.poly.centroid.buffer(100)
buf2 = buf1.transform(4269, clone=True)
ref_zips = ['77002', '77025', '77401']
for buf in [buf1, buf2]:
qs = CensusZipcode.objects.exclude(name='77005').transform(32140).distance(buf)
self.assertListEqual(ref_zips, self.get_names(qs))
for i, z in enumerate(qs):
self.assertAlmostEqual(z.distance.m, dists_m[i], 5)
def test_distance_lookups(self):
"""
Test the `distance_lt`, `distance_gt`, `distance_lte`, and `distance_gte` lookup types.
"""
# Retrieving the cities within a 20km 'donut' w/a 7km radius 'hole'
# (thus, Houston and Southside place will be excluded as tested in
# the `test02_dwithin` above).
qs1 = SouthTexasCity.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
# Can't determine the units on SpatiaLite from PROJ.4 string, and
# Oracle 11 incorrectly thinks it is not projected.
if spatialite or oracle:
dist_qs = (qs1,)
else:
qs2 = SouthTexasCityFt.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
dist_qs = (qs1, qs2)
for qs in dist_qs:
cities = self.get_names(qs)
self.assertEqual(cities, ['Bellaire', 'Pearland', 'West University Place'])
# Doing a distance query using Polygons instead of a Point.
z = SouthTexasZipcode.objects.get(name='77005')
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=275)))
self.assertEqual(['77025', '77401'], self.get_names(qs))
# If we add a little more distance 77002 should be included.
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=300)))
self.assertEqual(['77002', '77025', '77401'], self.get_names(qs))
def test_geodetic_distance_lookups(self):
"""
Test distance lookups on geodetic coordinate systems.
"""
# Line is from Canberra to Sydney. Query is for all other cities within
# a 100km of that line (which should exclude only Hobart & Adelaide).
line = GEOSGeometry('LINESTRING(144.9630 -37.8143,151.2607 -33.8870)', 4326)
dist_qs = AustraliaCity.objects.filter(point__distance_lte=(line, D(km=100)))
if oracle or connection.ops.geography:
# Oracle and PostGIS 1.5 can do distance lookups on arbitrary geometries.
self.assertEqual(9, dist_qs.count())
self.assertEqual(['Batemans Bay', 'Canberra', 'Hillsdale',
'Melbourne', 'Mittagong', 'Shellharbour',
'Sydney', 'Thirroul', 'Wollongong'],
self.get_names(dist_qs))
else:
# PostGIS 1.4 and below only allows geodetic distance queries (utilizing
# ST_Distance_Sphere/ST_Distance_Spheroid) from Points to PointFields
# on geometry columns.
self.assertRaises(ValueError, dist_qs.count)
# Ensured that a ValueError was raised, none of the rest of the test is
# support on this backend, so bail now.
if spatialite:
return
# Too many params (4 in this case) should raise a ValueError.
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4')))
# Not enough params should raise a ValueError.
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)',)))
# Getting all cities w/in 550 miles of Hobart.
hobart = AustraliaCity.objects.get(name='Hobart')
qs = AustraliaCity.objects.exclude(name='Hobart').filter(point__distance_lte=(hobart.point, D(mi=550)))
cities = self.get_names(qs)
self.assertEqual(cities, ['Batemans Bay', 'Canberra', 'Melbourne'])
# Cities that are either really close or really far from Wollongong --
# and using different units of distance.
wollongong = AustraliaCity.objects.get(name='Wollongong')
d1, d2 = D(yd=19500), D(nm=400) # Yards (~17km) & Nautical miles.
# Normal geodetic distance lookup (uses `distance_sphere` on PostGIS.
gq1 = Q(point__distance_lte=(wollongong.point, d1))
gq2 = Q(point__distance_gte=(wollongong.point, d2))
qs1 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq1 | gq2)
# Geodetic distance lookup but telling GeoDjango to use `distance_spheroid`
# instead (we should get the same results b/c accuracy variance won't matter
# in this test case).
if postgis:
gq3 = Q(point__distance_lte=(wollongong.point, d1, 'spheroid'))
gq4 = Q(point__distance_gte=(wollongong.point, d2, 'spheroid'))
qs2 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq3 | gq4)
querysets = [qs1, qs2]
else:
querysets = [qs1]
for qs in querysets:
cities = self.get_names(qs)
self.assertEqual(cities, ['Adelaide', 'Hobart', 'Shellharbour', 'Thirroul'])
def test_area(self):
"""
Test the `area` GeoQuerySet method.
"""
# Reference queries:
# SELECT ST_Area(poly) FROM distapp_southtexaszipcode;
area_sq_m = [5437908.90234375, 10183031.4389648, 11254471.0073242, 9881708.91772461]
# Tolerance has to be lower for Oracle and differences
# with GEOS 3.0.0RC4
tol = 2
for i, z in enumerate(SouthTexasZipcode.objects.area()):
self.assertAlmostEqual(area_sq_m[i], z.area.sq_m, tol)
def test_length(self):
"""
Test the `length` GeoQuerySet method.
"""
# Reference query (should use `length_spheroid`).
# SELECT ST_length_spheroid(ST_GeomFromText('<wkt>', 4326) 'SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]]');
len_m1 = 473504.769553813
len_m2 = 4617.668
if spatialite:
# Does not support geodetic coordinate systems.
self.assertRaises(ValueError, Interstate.objects.length)
else:
qs = Interstate.objects.length()
tol = 2 if oracle else 3
self.assertAlmostEqual(len_m1, qs[0].length.m, tol)
# Now doing length on a projected coordinate system.
i10 = SouthTexasInterstate.objects.length().get(name='I-10')
self.assertAlmostEqual(len_m2, i10.length.m, 2)
@no_spatialite
def test_perimeter(self):
"""
Test the `perimeter` GeoQuerySet method.
"""
# Reference query:
# SELECT ST_Perimeter(distapp_southtexaszipcode.poly) FROM distapp_southtexaszipcode;
perim_m = [18404.3550889361, 15627.2108551001, 20632.5588368978, 17094.5996143697]
tol = 2 if oracle else 7
for i, z in enumerate(SouthTexasZipcode.objects.perimeter()):
self.assertAlmostEqual(perim_m[i], z.perimeter.m, tol)
# Running on points; should return 0.
for i, c in enumerate(SouthTexasCity.objects.perimeter(model_att='perim')):
self.assertEqual(0, c.perim.m)
def test_measurement_null_fields(self):
"""
Test the measurement GeoQuerySet methods on fields with NULL values.
"""
# Creating SouthTexasZipcode w/NULL value.
SouthTexasZipcode.objects.create(name='78212')
# Performing distance/area queries against the NULL PolygonField,
# and ensuring the result of the operations is None.
htown = SouthTexasCity.objects.get(name='Downtown Houston')
z = SouthTexasZipcode.objects.distance(htown.point).area().get(name='78212')
self.assertIsNone(z.distance)
self.assertIsNone(z.area)
|
mit
| 8,791,254,688,426,689,000 | -1,086,606,618,580,495,400 | 48.928753 | 196 | 0.625522 | false |
ivanvladimir/gensim
|
gensim/corpora/ucicorpus.py
|
68
|
7517
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Jonathan Esterhazy <jonathan.esterhazy at gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
University of California, Irvine (UCI) Bag-of-Words format.
http://archive.ics.uci.edu/ml/datasets/Bag+of+Words
"""
from __future__ import with_statement
import logging
from collections import defaultdict
from gensim import utils
from gensim.corpora import Dictionary
from gensim.corpora import IndexedCorpus
from gensim.matutils import MmReader
from gensim.matutils import MmWriter
from six import iteritems, string_types
from six.moves import xrange
logger = logging.getLogger('gensim.corpora.ucicorpus')
class UciReader(MmReader):
def __init__(self, input):
"""
Initialize the reader.
The `input` parameter refers to a file on the local filesystem,
which is expected to be in the UCI Bag-of-Words format.
"""
logger.info('Initializing corpus reader from %s' % input)
self.input = input
with utils.smart_open(self.input) as fin:
self.num_docs = self.num_terms = self.num_nnz = 0
try:
self.num_docs = int(next(fin).strip())
self.num_terms = int(next(fin).strip())
self.num_nnz = int(next(fin).strip())
except StopIteration:
pass
logger.info('accepted corpus with %i documents, %i features, %i non-zero entries' %
(self.num_docs, self.num_terms, self.num_nnz))
def skip_headers(self, input_file):
for lineno, _ in enumerate(input_file):
if lineno == 2:
break
# endclass UciReader
class UciWriter(MmWriter):
"""
Store a corpus in UCI Bag-of-Words format.
This corpus format is identical to MM format, except for
different file headers. There is no format line, and the first
three lines of the file contain number_docs, num_terms, and num_nnz,
one value per line.
This implementation is based on matutils.MmWriter, and works the same way.
"""
MAX_HEADER_LENGTH = 20 # reserve 20 bytes per header value
FAKE_HEADER = utils.to_utf8(' ' * MAX_HEADER_LENGTH + '\n')
def write_headers(self):
"""
Write blank header lines. Will be updated later, once corpus stats are known.
"""
for _ in range(3):
self.fout.write(self.FAKE_HEADER)
self.last_docno = -1
self.headers_written = True
def update_headers(self, num_docs, num_terms, num_nnz):
"""
Update headers with actual values.
"""
offset = 0
values = [utils.to_utf8(str(n)) for n in [num_docs, num_terms, num_nnz]]
for value in values:
if len(value) > len(self.FAKE_HEADER):
raise ValueError('Invalid header: value too large!')
self.fout.seek(offset)
self.fout.write(value)
offset += len(self.FAKE_HEADER)
@staticmethod
def write_corpus(fname, corpus, progress_cnt=1000, index=False):
writer = UciWriter(fname)
writer.write_headers()
num_terms, num_nnz = 0, 0
docno, poslast = -1, -1
offsets = []
for docno, bow in enumerate(corpus):
if docno % progress_cnt == 0:
logger.info("PROGRESS: saving document #%i" % docno)
if index:
posnow = writer.fout.tell()
if posnow == poslast:
offsets[-1] = -1
offsets.append(posnow)
poslast = posnow
vector = [(x, int(y)) for (x, y) in bow if int(y) != 0] # integer count, not floating weights
max_id, veclen = writer.write_vector(docno, vector)
num_terms = max(num_terms, 1 + max_id)
num_nnz += veclen
num_docs = docno + 1
if num_docs * num_terms != 0:
logger.info("saved %ix%i matrix, density=%.3f%% (%i/%i)" %
(num_docs, num_terms,
100.0 * num_nnz / (num_docs * num_terms),
num_nnz,
num_docs * num_terms))
# now write proper headers, by seeking and overwriting the spaces written earlier
writer.update_headers(num_docs, num_terms, num_nnz)
writer.close()
if index:
return offsets
# endclass UciWriter
class UciCorpus(UciReader, IndexedCorpus):
"""
Corpus in the UCI bag-of-words format.
"""
def __init__(self, fname, fname_vocab=None):
IndexedCorpus.__init__(self, fname)
UciReader.__init__(self, fname)
if fname_vocab is None:
fname_vocab = utils.smart_extension(fname, '.vocab')
self.fname = fname
with utils.smart_open(fname_vocab) as fin:
words = [word.strip() for word in fin]
self.id2word = dict(enumerate(words))
self.transposed = True
def __iter__(self):
"""
Interpret a matrix in UCI bag-of-words format as a streamed gensim corpus
(yielding one document at a time).
"""
for docId, doc in super(UciCorpus, self).__iter__():
yield doc # get rid of docId, return the sparse vector only
def create_dictionary(self):
"""
Utility method to generate gensim-style Dictionary directly from
the corpus and vocabulary data.
"""
dictionary = Dictionary()
# replace dfs with defaultdict to avoid downstream KeyErrors
# uci vocabularies may contain terms that are not used in the document data
dictionary.dfs = defaultdict(int)
dictionary.id2token = self.id2word
dictionary.token2id = dict((v, k) for k, v in iteritems(self.id2word))
dictionary.num_docs = self.num_docs
dictionary.num_nnz = self.num_nnz
for docno, doc in enumerate(self):
if docno % 10000 == 0:
logger.info('PROGRESS: processing document %i of %i' % (docno, self.num_docs))
for word, count in doc:
dictionary.dfs[word] += 1
dictionary.num_pos += count
return dictionary
@staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=10000, metadata=False):
"""
Save a corpus in the UCI Bag-of-Words format.
There are actually two files saved: `fname` and `fname.vocab`, where
`fname.vocab` is the vocabulary file.
This function is automatically called by `UciCorpus.serialize`; don't
call it directly, call `serialize` instead.
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
num_terms = len(id2word)
else:
num_terms = 1 + max([-1] + id2word.keys())
# write out vocabulary
fname_vocab = utils.smart_extension(fname, '.vocab')
logger.info("saving vocabulary of %i words to %s" % (num_terms, fname_vocab))
with utils.smart_open(fname_vocab, 'wb') as fout:
for featureid in xrange(num_terms):
fout.write(utils.to_utf8("%s\n" % id2word.get(featureid, '---')))
logger.info("storing corpus in UCI Bag-of-Words format: %s" % fname)
return UciWriter.write_corpus(fname, corpus, index=True, progress_cnt=progress_cnt)
# endclass UciCorpus
|
gpl-3.0
| -1,743,201,336,652,776,700 | -4,554,669,092,255,808,500 | 32.261062 | 105 | 0.594253 | false |
kuiche/chromium
|
tools/grit/grit/tool/count.py
|
7
|
1024
|
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Count number of occurrences of a given message ID
'''
import getopt
import os
import types
from grit.tool import interface
from grit import grd_reader
from grit import util
from grit.extern import tclib
class CountMessage(interface.Tool):
'''Count the number of times a given message ID is used.
'''
def __init__(self):
pass
def ShortDescription(self):
return 'Exports all translateable messages into an XMB file.'
def Run(self, opts, args):
self.SetOptions(opts)
id = args[0]
res_tree = grd_reader.Parse(opts.input, debug=opts.extra_verbose)
res_tree.OnlyTheseTranslations([])
res_tree.RunGatherers(True)
count = 0
for c in res_tree.UberClique().AllCliques():
if c.GetId() == id:
count += 1
print "There are %d occurrences of message %s." % (count, id)
|
bsd-3-clause
| -1,549,491,880,249,694,700 | 8,509,771,156,139,143,000 | 22.272727 | 72 | 0.694336 | false |
gamechanger/kafka-python
|
kafka/protocol/admin.py
|
1
|
1182
|
from .struct import Struct
from .types import Array, Bytes, Int16, Schema, String
class ListGroupsResponse(Struct):
SCHEMA = Schema(
('error_code', Int16),
('groups', Array(
('group', String('utf-8')),
('protocol_type', String('utf-8'))))
)
class ListGroupsRequest(Struct):
API_KEY = 16
API_VERSION = 0
RESPONSE_TYPE = ListGroupsResponse
SCHEMA = Schema()
class DescribeGroupsResponse(Struct):
SCHEMA = Schema(
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))))
)
class DescribeGroupsRequest(Struct):
API_KEY = 15
API_VERSION = 0
RESPONSE_TYPE = DescribeGroupsResponse
SCHEMA = Schema(
('groups', Array(String('utf-8')))
)
|
apache-2.0
| 8,024,654,474,325,048,000 | 7,448,444,360,400,129,000 | 25.863636 | 54 | 0.526227 | false |
CooperLuan/airflow
|
airflow/operators/hive_to_mysql.py
|
29
|
2324
|
import logging
from airflow.hooks import HiveServer2Hook, MySqlHook
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
class HiveToMySqlTransfer(BaseOperator):
"""
Moves data from Hive to MySQL, note that for now the data is loaded
into memory before being pushed to MySQL, so this operator should
be used for smallish amount of data.
:param sql: SQL query to execute against the MySQL database
:type sql: str
:param mysql_table: target MySQL table, use dot notation to target a
specific database
:type mysql_table: str
:param mysql_conn_id: source mysql connection
:type mysql_conn_id: str
:param hiveserver2_conn_id: destination hive connection
:type hiveserver2_conn_id: str
:param mysql_preoperator: sql statement to run against mysql prior to
import, typically use to truncate of delete in place of the data
coming in, allowing the task to be idempotent (running the task
twice won't double load data)
:type mysql_preoperator: str
"""
template_fields = ('sql', 'mysql_table', 'mysql_preoperator')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
sql,
mysql_table,
hiveserver2_conn_id='hiveserver2_default',
mysql_conn_id='mysql_default',
mysql_preoperator=None,
*args, **kwargs):
super(HiveToMySqlTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.mysql_table = mysql_table
self.mysql_conn_id = mysql_conn_id
self.mysql_preoperator = mysql_preoperator
self.hiveserver2_conn_id = hiveserver2_conn_id
def execute(self, context):
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
logging.info("Extracting data from Hive")
logging.info(self.sql)
results = hive.get_records(self.sql)
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
if self.mysql_preoperator:
logging.info("Running MySQL preoperator")
logging.info(self.mysql_preoperator)
mysql.run(self.mysql_preoperator)
logging.info("Inserting rows into MySQL")
mysql.insert_rows(table=self.mysql_table, rows=results)
|
apache-2.0
| 482,199,594,340,346,800 | 8,964,241,742,591,737,000 | 35.888889 | 76 | 0.66222 | false |
sam-m888/gramps
|
gramps/gen/filters/rules/media/_hasnotematchingsubstringof.py
|
5
|
1776
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .._hasnotesubstrbase import HasNoteSubstrBase
#-------------------------------------------------------------------------
# "Media having notes that contain a substring"
#-------------------------------------------------------------------------
class HasNoteMatchingSubstringOf(HasNoteSubstrBase):
"""Media having notes containing <substring>"""
name = _('Media objects having notes containing <substring>')
description = _("Matches media objects whose notes contain text "
"matching a substring")
|
gpl-2.0
| -8,903,532,954,846,227,000 | 587,578,695,529,138,300 | 38.466667 | 79 | 0.555743 | false |
pkilambi/python-gnocchiclient
|
gnocchiclient/openstack/common/version.py
|
15
|
5885
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities for consuming the auto-generated versioninfo files.
"""
import datetime
import pkg_resources
import setup
class _deferred_version_string(object):
"""Internal helper class which provides delayed version calculation."""
def __init__(self, version_info, prefix):
self.version_info = version_info
self.prefix = prefix
def __str__(self):
return "%s%s" % (self.prefix, self.version_info.version_string())
def __repr__(self):
return "%s%s" % (self.prefix, self.version_info.version_string())
class VersionInfo(object):
def __init__(self, package, python_package=None, pre_version=None):
"""Object that understands versioning for a package
:param package: name of the top level python namespace. For glance,
this would be "glance" for python-glanceclient, it
would be "glanceclient"
:param python_package: optional name of the project name. For
glance this can be left unset. For
python-glanceclient, this would be
"python-glanceclient"
:param pre_version: optional version that the project is working to
"""
self.package = package
if python_package is None:
self.python_package = package
else:
self.python_package = python_package
self.pre_version = pre_version
self.version = None
def _generate_version(self):
"""Defer to the openstack.common.setup routines for making a
version from git."""
if self.pre_version is None:
return setup.get_post_version(self.python_package)
else:
return setup.get_pre_version(self.python_package, self.pre_version)
def _newer_version(self, pending_version):
"""Check to see if we're working with a stale version or not.
We expect a version string that either looks like:
2012.2~f3~20120708.10.4426392
which is an unreleased version of a pre-version, or:
0.1.1.4.gcc9e28a
which is an unreleased version of a post-version, or:
0.1.1
Which is a release and which should match tag.
For now, if we have a date-embedded version, check to see if it's
old, and if so re-generate. Otherwise, just deal with it.
"""
try:
version_date = int(self.version.split("~")[-1].split('.')[0])
if version_date < int(datetime.date.today().strftime('%Y%m%d')):
return self._generate_version()
else:
return pending_version
except Exception:
return pending_version
def version_string_with_vcs(self, always=False):
"""Return the full version of the package including suffixes indicating
VCS status.
For instance, if we are working towards the 2012.2 release,
canonical_version_string should return 2012.2 if this is a final
release, or else something like 2012.2~f1~20120705.20 if it's not.
:param always: if true, skip all version caching
"""
if always:
self.version = self._generate_version()
if self.version is None:
requirement = pkg_resources.Requirement.parse(self.python_package)
versioninfo = "%s/versioninfo" % self.package
try:
raw_version = pkg_resources.resource_string(requirement,
versioninfo)
self.version = self._newer_version(raw_version.strip())
except (IOError, pkg_resources.DistributionNotFound):
self.version = self._generate_version()
return self.version
def canonical_version_string(self, always=False):
"""Return the simple version of the package excluding any suffixes.
For instance, if we are working towards the 2012.2 release,
canonical_version_string should return 2012.2 in all cases.
:param always: if true, skip all version caching
"""
return self.version_string_with_vcs(always).split('~')[0]
def version_string(self, always=False):
"""Return the base version of the package.
For instance, if we are working towards the 2012.2 release,
version_string should return 2012.2 if this is a final release, or
2012.2-dev if it is not.
:param always: if true, skip all version caching
"""
version_parts = self.version_string_with_vcs(always).split('~')
if len(version_parts) == 1:
return version_parts[0]
else:
return '%s-dev' % (version_parts[0],)
def deferred_version_string(self, prefix=""):
"""Generate an object which will expand in a string context to
the results of version_string(). We do this so that don't
call into pkg_resources every time we start up a program when
passing version information into the CONF constructor, but
rather only do the calculation when and if a version is requested
"""
return _deferred_version_string(self, prefix)
|
apache-2.0
| 2,134,061,684,529,715,200 | 1,983,090,922,235,288,600 | 38.763514 | 79 | 0.625319 | false |
UTSA-ICS/keystone-SID
|
keystone/tests/test_auth.py
|
1
|
44678
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import uuid
import mock
from keystone import assignment
from keystone import auth
from keystone.common import authorization
from keystone.common import environment
from keystone import config
from keystone import exception
from keystone.openstack.common import timeutils
from keystone import tests
from keystone.tests import default_fixtures
from keystone import token
from keystone import trust
CONF = config.CONF
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
HOST_URL = 'http://keystone:5001'
def _build_user_auth(token=None, user_id=None, username=None,
password=None, tenant_id=None, tenant_name=None,
trust_id=None):
"""Build auth dictionary.
It will create an auth dictionary based on all the arguments
that it receives.
"""
auth_json = {}
if token is not None:
auth_json['token'] = token
if username or password:
auth_json['passwordCredentials'] = {}
if username is not None:
auth_json['passwordCredentials']['username'] = username
if user_id is not None:
auth_json['passwordCredentials']['userId'] = user_id
if password is not None:
auth_json['passwordCredentials']['password'] = password
if tenant_name is not None:
auth_json['tenantName'] = tenant_name
if tenant_id is not None:
auth_json['tenantId'] = tenant_id
if trust_id is not None:
auth_json['trust_id'] = trust_id
return auth_json
class AuthTest(tests.TestCase):
def setUp(self):
super(AuthTest, self).setUp()
self.load_backends()
self.load_fixtures(default_fixtures)
# need to register the token provider first because auth controller
# depends on it
token.provider.Manager()
self.context_with_remote_user = {'environment':
{'REMOTE_USER': 'FOO',
'AUTH_TYPE': 'Negotiate'}}
self.empty_context = {'environment': {}}
self.controller = token.controllers.Auth()
#This call sets up, among other things, the call to popen
#that will be used to run the CMS command. These tests were
#passing only due to the global nature of the call. If the
#tests in this file are run alone, API calls return unauthorized.
environment.use_eventlet(monkeypatch_thread=False)
def assertEqualTokens(self, a, b):
"""Assert that two tokens are equal.
Compare two tokens except for their ids. This also truncates
the time in the comparison.
"""
def normalize(token):
token['access']['token']['id'] = 'dummy'
del token['access']['token']['expires']
del token['access']['token']['issued_at']
return token
self.assertCloseEnoughForGovernmentWork(
timeutils.parse_isotime(a['access']['token']['expires']),
timeutils.parse_isotime(b['access']['token']['expires']))
self.assertCloseEnoughForGovernmentWork(
timeutils.parse_isotime(a['access']['token']['issued_at']),
timeutils.parse_isotime(b['access']['token']['issued_at']))
return self.assertDictEqual(normalize(a), normalize(b))
class AuthBadRequests(AuthTest):
def setUp(self):
super(AuthBadRequests, self).setUp()
def test_no_external_auth(self):
"""Verify that _authenticate_external() raises exception if N/A."""
self.assertRaises(
token.controllers.ExternalAuthNotApplicable,
self.controller._authenticate_external,
{}, {})
def test_no_token_in_auth(self):
"""Verify that _authenticate_token() raises exception if no token."""
self.assertRaises(
exception.ValidationError,
self.controller._authenticate_token,
None, {})
def test_no_credentials_in_auth(self):
"""Verify that _authenticate_local() raises exception if no creds."""
self.assertRaises(
exception.ValidationError,
self.controller._authenticate_local,
None, {})
def test_authenticate_blank_request_body(self):
"""Verify sending empty json dict raises the right exception."""
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, {})
def test_authenticate_blank_auth(self):
"""Verify sending blank 'auth' raises the right exception."""
body_dict = _build_user_auth()
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_invalid_auth_content(self):
"""Verify sending invalid 'auth' raises the right exception."""
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, {'auth': 'abcd'})
def test_authenticate_user_id_too_large(self):
"""Verify sending large 'userId' raises the right exception."""
body_dict = _build_user_auth(user_id='0' * 65, username='FOO',
password='foo2')
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_username_too_large(self):
"""Verify sending large 'username' raises the right exception."""
body_dict = _build_user_auth(username='0' * 65, password='foo2')
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_tenant_id_too_large(self):
"""Verify sending large 'tenantId' raises the right exception."""
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_id='0' * 65)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_tenant_name_too_large(self):
"""Verify sending large 'tenantName' raises the right exception."""
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_name='0' * 65)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_token_too_large(self):
"""Verify sending large 'token' raises the right exception."""
body_dict = _build_user_auth(token={'id': '0' * 8193})
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_password_too_large(self):
"""Verify sending large 'password' raises the right exception."""
length = CONF.identity.max_password_length + 1
body_dict = _build_user_auth(username='FOO', password='0' * length)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
class AuthWithToken(AuthTest):
def setUp(self):
super(AuthWithToken, self).setUp()
def test_unscoped_token(self):
"""Verify getting an unscoped token with password creds."""
body_dict = _build_user_auth(username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
self.assertNotIn('tenant', unscoped_token['access']['token'])
def test_auth_invalid_token(self):
"""Verify exception is raised if invalid token."""
body_dict = _build_user_auth(token={"id": uuid.uuid4().hex})
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_bad_formatted_token(self):
"""Verify exception is raised if invalid token."""
body_dict = _build_user_auth(token={})
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_auth_unscoped_token_no_project(self):
"""Verify getting an unscoped token with an unscoped token."""
body_dict = _build_user_auth(
username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
body_dict = _build_user_auth(
token=unscoped_token["access"]["token"])
unscoped_token_2 = self.controller.authenticate({}, body_dict)
self.assertEqualTokens(unscoped_token, unscoped_token_2)
def test_auth_unscoped_token_project(self):
"""Verify getting a token in a tenant with an unscoped token."""
# Add a role in so we can check we get this back
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_member['id'])
# Get an unscoped tenant
body_dict = _build_user_auth(
username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
# Get a token on BAR tenant using the unscoped tenant
body_dict = _build_user_auth(
token=unscoped_token["access"]["token"],
tenant_name="BAR")
scoped_token = self.controller.authenticate({}, body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(self.tenant_bar['id'], tenant["id"])
self.assertEqual(self.role_member['id'], roles[0])
def test_auth_token_project_group_role(self):
"""Verify getting a token in a tenant with group roles."""
# Add a v2 style role in so we can check we get this back
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_member['id'])
# Now create a group role for this user as well
domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(domain1['id'], domain1)
new_group = {'id': uuid.uuid4().hex, 'domain_id': domain1['id'],
'name': uuid.uuid4().hex}
self.identity_api.create_group(new_group['id'], new_group)
self.identity_api.add_user_to_group(self.user_foo['id'],
new_group['id'])
self.assignment_api.create_grant(
group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_admin['id'])
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
scoped_token = self.controller.authenticate({}, body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(self.tenant_bar['id'], tenant["id"])
self.assertIn(self.role_member['id'], roles)
self.assertIn(self.role_admin['id'], roles)
def test_auth_token_cross_domain_group_and_project(self):
"""Verify getting a token in cross domain group/project roles."""
# create domain, project and group and grant roles to user
domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(domain1['id'], domain1)
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain1['id']}
self.assignment_api.create_project(project1['id'], project1)
role_foo_domain1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assignment_api.create_role(role_foo_domain1['id'],
role_foo_domain1)
role_group_domain1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assignment_api.create_role(role_group_domain1['id'],
role_group_domain1)
self.assignment_api.add_user_to_project(project1['id'],
self.user_foo['id'])
new_group = {'id': uuid.uuid4().hex, 'domain_id': domain1['id'],
'name': uuid.uuid4().hex}
self.identity_api.create_group(new_group['id'], new_group)
self.identity_api.add_user_to_group(self.user_foo['id'],
new_group['id'])
self.assignment_api.create_grant(
user_id=self.user_foo['id'],
project_id=project1['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(
group_id=new_group['id'],
project_id=project1['id'],
role_id=self.role_admin['id'])
self.assignment_api.create_grant(
user_id=self.user_foo['id'],
domain_id=domain1['id'],
role_id=role_foo_domain1['id'])
self.assignment_api.create_grant(
group_id=new_group['id'],
domain_id=domain1['id'],
role_id=role_group_domain1['id'])
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_name=project1['name'])
scoped_token = self.controller.authenticate({}, body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(project1['id'], tenant["id"])
self.assertIn(self.role_member['id'], roles)
self.assertIn(self.role_admin['id'], roles)
self.assertNotIn(role_foo_domain1['id'], roles)
self.assertNotIn(role_group_domain1['id'], roles)
def test_belongs_to_no_tenant(self):
r = self.controller.authenticate(
{},
auth={
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password']
}
})
unscoped_token_id = r['access']['token']['id']
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
dict(is_admin=True, query_string={'belongsTo': 'BAR'}),
token_id=unscoped_token_id)
def test_belongs_to(self):
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
scoped_token = self.controller.authenticate({}, body_dict)
scoped_token_id = scoped_token['access']['token']['id']
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
dict(is_admin=True, query_string={'belongsTo': 'me'}),
token_id=scoped_token_id)
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
dict(is_admin=True, query_string={'belongsTo': 'BAR'}),
token_id=scoped_token_id)
def test_token_auth_with_binding(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth()
unscoped_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
# the token should have bind information in it
bind = unscoped_token['access']['token']['bind']
self.assertEqual('FOO', bind['kerberos'])
body_dict = _build_user_auth(
token=unscoped_token['access']['token'],
tenant_name='BAR')
# using unscoped token without remote user context fails
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
self.empty_context, body_dict)
# using token with remote user context succeeds
scoped_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
# the bind information should be carried over from the original token
bind = scoped_token['access']['token']['bind']
self.assertEqual('FOO', bind['kerberos'])
def test_deleting_role_revokes_token(self):
role_controller = assignment.controllers.Role()
project1 = {'id': 'Project1', 'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID}
self.assignment_api.create_project(project1['id'], project1)
role_one = {'id': 'role_one', 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role_one['id'], role_one)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project1['id'], role_one['id'])
no_context = {}
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_name=project1['name'])
token = self.controller.authenticate(no_context, body_dict)
# Ensure it is valid
token_id = token['access']['token']['id']
self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=token_id)
# Delete the role, which should invalidate the token
role_controller.delete_role(
dict(is_admin=True, query_string={}), role_one['id'])
# Check the token is now invalid
self.assertRaises(
exception.TokenNotFound,
self.controller.validate_token,
dict(is_admin=True, query_string={}),
token_id=token_id)
class AuthWithPasswordCredentials(AuthTest):
def setUp(self):
super(AuthWithPasswordCredentials, self).setUp()
def test_auth_invalid_user(self):
"""Verify exception is raised if invalid user."""
body_dict = _build_user_auth(
username=uuid.uuid4().hex,
password=uuid.uuid4().hex)
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_valid_user_invalid_password(self):
"""Verify exception is raised if invalid password."""
body_dict = _build_user_auth(
username="FOO",
password=uuid.uuid4().hex)
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_empty_password(self):
"""Verify exception is raised if empty password."""
body_dict = _build_user_auth(
username="FOO",
password="")
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_no_password(self):
"""Verify exception is raised if empty password."""
body_dict = _build_user_auth(username="FOO")
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_blank_password_credentials(self):
"""Sending empty dict as passwordCredentials raises a 400 error."""
body_dict = {'passwordCredentials': {}, 'tenantName': 'demo'}
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_no_username(self):
"""Verify skipping username raises the right exception."""
body_dict = _build_user_auth(password="pass",
tenant_name="demo")
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_bind_without_remote_user(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_name='BAR')
token = self.controller.authenticate({}, body_dict)
self.assertNotIn('bind', token['access']['token'])
def test_change_default_domain_id(self):
# If the default_domain_id config option is not the default then the
# user in auth data is from the new default domain.
# 1) Create a new domain.
new_domain_id = uuid.uuid4().hex
new_domain = {
'description': uuid.uuid4().hex,
'enabled': True,
'id': new_domain_id,
'name': uuid.uuid4().hex,
}
self.assignment_api.create_domain(new_domain_id, new_domain)
# 2) Create user "foo" in new domain with different password than
# default-domain foo.
new_user_id = uuid.uuid4().hex
new_user_password = uuid.uuid4().hex
new_user = {
'id': new_user_id,
'name': self.user_foo['name'],
'domain_id': new_domain_id,
'password': new_user_password,
'email': '[email protected]',
}
self.identity_api.create_user(new_user_id, new_user)
# 3) Update the default_domain_id config option to the new domain
self.config_fixture.config(group='identity',
default_domain_id=new_domain_id)
# 4) Authenticate as "foo" using the password in the new domain.
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=new_user_password)
# The test is successful if this doesn't raise, so no need to assert.
self.controller.authenticate({}, body_dict)
class AuthWithRemoteUser(AuthTest):
def setUp(self):
super(AuthWithRemoteUser, self).setUp()
def test_unscoped_remote_authn(self):
"""Verify getting an unscoped token with external authn."""
body_dict = _build_user_auth(
username='FOO',
password='foo2')
local_token = self.controller.authenticate(
{}, body_dict)
body_dict = _build_user_auth()
remote_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
self.assertEqualTokens(local_token, remote_token)
def test_unscoped_remote_authn_jsonless(self):
"""Verify that external auth with invalid request fails."""
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
{'REMOTE_USER': 'FOO'},
None)
def test_scoped_remote_authn(self):
"""Verify getting a token with external authn."""
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name='BAR')
local_token = self.controller.authenticate(
{}, body_dict)
body_dict = _build_user_auth(
tenant_name='BAR')
remote_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
self.assertEqualTokens(local_token, remote_token)
def test_scoped_nometa_remote_authn(self):
"""Verify getting a token with external authn and no metadata."""
body_dict = _build_user_auth(
username='TWO',
password='two2',
tenant_name='BAZ')
local_token = self.controller.authenticate(
{}, body_dict)
body_dict = _build_user_auth(tenant_name='BAZ')
remote_token = self.controller.authenticate(
{'environment': {'REMOTE_USER': 'TWO'}}, body_dict)
self.assertEqualTokens(local_token, remote_token)
def test_scoped_remote_authn_invalid_user(self):
"""Verify that external auth with invalid user fails."""
body_dict = _build_user_auth(tenant_name="BAR")
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{'environment': {'REMOTE_USER': uuid.uuid4().hex}},
body_dict)
def test_bind_with_kerberos(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth(tenant_name="BAR")
token = self.controller.authenticate(self.context_with_remote_user,
body_dict)
self.assertEqual('FOO', token['access']['token']['bind']['kerberos'])
def test_bind_without_config_opt(self):
self.config_fixture.config(group='token', bind=['x509'])
body_dict = _build_user_auth(tenant_name='BAR')
token = self.controller.authenticate(self.context_with_remote_user,
body_dict)
self.assertNotIn('bind', token['access']['token'])
class AuthWithTrust(AuthTest):
def setUp(self):
super(AuthWithTrust, self).setUp()
trust.Manager()
self.trust_controller = trust.controllers.TrustV3()
self.auth_v3_controller = auth.controllers.Auth()
self.trustor = self.user_foo
self.trustee = self.user_two
self.assigned_roles = [self.role_member['id'],
self.role_browser['id']]
for assigned_role in self.assigned_roles:
self.assignment_api.add_role_to_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
self.sample_data = {'trustor_user_id': self.trustor['id'],
'trustee_user_id': self.trustee['id'],
'project_id': self.tenant_bar['id'],
'impersonation': True,
'roles': [{'id': self.role_browser['id']},
{'name': self.role_member['name']}]}
expires_at = timeutils.strtime(timeutils.utcnow() +
datetime.timedelta(minutes=10),
fmt=TIME_FORMAT)
self.create_trust(expires_at=expires_at)
def config_overrides(self):
super(AuthWithTrust, self).config_overrides()
self.config_fixture.config(group='trust', enabled=True)
def _create_auth_context(self, token_id):
token_ref = self.token_api.get_token(token_id)
auth_context = authorization.token_to_auth_context(
token_ref['token_data'])
return {'environment': {authorization.AUTH_CONTEXT_ENV: auth_context},
'token_id': token_id,
'host_url': HOST_URL}
def create_trust(self, expires_at=None, impersonation=True):
username = self.trustor['name']
password = 'foo2'
body_dict = _build_user_auth(username=username, password=password)
self.unscoped_token = self.controller.authenticate({}, body_dict)
context = self._create_auth_context(
self.unscoped_token['access']['token']['id'])
trust_data = copy.deepcopy(self.sample_data)
trust_data['expires_at'] = expires_at
trust_data['impersonation'] = impersonation
self.new_trust = self.trust_controller.create_trust(
context, trust=trust_data)['trust']
def build_v2_token_request(self, username, password):
body_dict = _build_user_auth(username=username, password=password)
self.unscoped_token = self.controller.authenticate({}, body_dict)
unscoped_token_id = self.unscoped_token['access']['token']['id']
request_body = _build_user_auth(token={'id': unscoped_token_id},
trust_id=self.new_trust['id'],
tenant_id=self.tenant_bar['id'])
return request_body
def test_create_trust_bad_data_fails(self):
context = self._create_auth_context(
self.unscoped_token['access']['token']['id'])
bad_sample_data = {'trustor_user_id': self.trustor['id'],
'project_id': self.tenant_bar['id'],
'roles': [{'id': self.role_browser['id']}]}
self.assertRaises(exception.ValidationError,
self.trust_controller.create_trust,
context, trust=bad_sample_data)
def test_create_trust_no_roles(self):
context = {'token_id': self.unscoped_token['access']['token']['id']}
self.sample_data['roles'] = []
self.assertRaises(exception.Forbidden,
self.trust_controller.create_trust,
context, trust=self.sample_data)
def test_create_trust(self):
self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id'])
role_ids = [self.role_browser['id'], self.role_member['id']]
self.assertTrue(timeutils.parse_strtime(self.new_trust['expires_at'],
fmt=TIME_FORMAT))
self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
self.new_trust['links']['self'])
self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
self.new_trust['roles_links']['self'])
for role in self.new_trust['roles']:
self.assertIn(role['id'], role_ids)
def test_create_trust_expires_bad(self):
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust,
expires_at="bad")
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust,
expires_at="")
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust,
expires_at="Z")
def test_get_trust(self):
context = {'token_id': self.unscoped_token['access']['token']['id'],
'host_url': HOST_URL}
trust = self.trust_controller.get_trust(context,
self.new_trust['id'])['trust']
self.assertEqual(self.trustor['id'], trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], trust['trustee_user_id'])
role_ids = [self.role_browser['id'], self.role_member['id']]
for role in self.new_trust['roles']:
self.assertIn(role['id'], role_ids)
def test_create_trust_no_impersonation(self):
self.create_trust(expires_at=None, impersonation=False)
self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id'])
self.assertIs(self.new_trust['impersonation'], False)
auth_response = self.fetch_v2_token_from_trust()
token_user = auth_response['access']['user']
self.assertEqual(token_user['id'], self.new_trust['trustee_user_id'])
# TODO(ayoung): Endpoints
def test_create_trust_impersonation(self):
self.create_trust(expires_at=None)
self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id'])
self.assertIs(self.new_trust['impersonation'], True)
auth_response = self.fetch_v2_token_from_trust()
token_user = auth_response['access']['user']
self.assertEqual(token_user['id'], self.new_trust['trustor_user_id'])
def test_token_from_trust_wrong_user_fails(self):
request_body = self.build_v2_token_request('FOO', 'foo2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def fetch_v2_token_from_trust(self):
request_body = self.build_v2_token_request('TWO', 'two2')
auth_response = self.controller.authenticate({}, request_body)
return auth_response
def fetch_v3_token_from_trust(self):
v3_password_data = {
'identity': {
"methods": ["password"],
"password": {
"user": {
"id": self.trustee["id"],
"password": self.trustee["password"]}}
},
'scope': {
'project': {
'id': self.tenant_baz['id']}}}
auth_response = (self.auth_v3_controller.authenticate_for_token
({'environment': {},
'query_string': {}},
v3_password_data))
token = auth_response.headers['X-Subject-Token']
v3_req_with_trust = {
"identity": {
"methods": ["token"],
"token": {"id": token}},
"scope": {
"OS-TRUST:trust": {"id": self.new_trust['id']}}}
token_auth_response = (self.auth_v3_controller.authenticate_for_token
({'environment': {},
'query_string': {}},
v3_req_with_trust))
return token_auth_response
def test_create_v3_token_from_trust(self):
auth_response = self.fetch_v3_token_from_trust()
trust_token_user = auth_response.json['token']['user']
self.assertEqual(self.trustor['id'], trust_token_user['id'])
trust_token_trust = auth_response.json['token']['OS-TRUST:trust']
self.assertEqual(trust_token_trust['id'], self.new_trust['id'])
self.assertEqual(self.trustor['id'],
trust_token_trust['trustor_user']['id'])
self.assertEqual(self.trustee['id'],
trust_token_trust['trustee_user']['id'])
trust_token_roles = auth_response.json['token']['roles']
self.assertEqual(2, len(trust_token_roles))
def test_v3_trust_token_get_token_fails(self):
auth_response = self.fetch_v3_token_from_trust()
trust_token = auth_response.headers['X-Subject-Token']
v3_token_data = {'identity': {
'methods': ['token'],
'token': {'id': trust_token}
}}
self.assertRaises(
exception.Forbidden,
self.auth_v3_controller.authenticate_for_token,
{'environment': {},
'query_string': {}}, v3_token_data)
def test_token_from_trust(self):
auth_response = self.fetch_v2_token_from_trust()
self.assertIsNotNone(auth_response)
self.assertEqual(2,
len(auth_response['access']['metadata']['roles']),
"user_foo has three roles, but the token should"
" only get the two roles specified in the trust.")
def assert_token_count_for_trust(self, expected_value):
tokens = self.trust_controller.token_api._list_tokens(
self.trustee['id'], trust_id=self.new_trust['id'])
token_count = len(tokens)
self.assertEqual(expected_value, token_count)
def test_delete_tokens_for_user_invalidates_tokens_from_trust(self):
self.assert_token_count_for_trust(0)
self.fetch_v2_token_from_trust()
self.assert_token_count_for_trust(1)
self.token_api.delete_tokens_for_user(self.trustee['id'])
self.assert_token_count_for_trust(0)
def test_token_from_trust_cant_get_another_token(self):
auth_response = self.fetch_v2_token_from_trust()
trust_token_id = auth_response['access']['token']['id']
request_body = _build_user_auth(token={'id': trust_token_id},
tenant_id=self.tenant_bar['id'])
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_delete_trust_revokes_token(self):
context = self._create_auth_context(
self.unscoped_token['access']['token']['id'])
self.fetch_v2_token_from_trust()
trust_id = self.new_trust['id']
tokens = self.token_api._list_tokens(self.trustor['id'],
trust_id=trust_id)
self.assertEqual(1, len(tokens))
self.trust_controller.delete_trust(context, trust_id=trust_id)
tokens = self.token_api._list_tokens(self.trustor['id'],
trust_id=trust_id)
self.assertEqual(0, len(tokens))
def test_token_from_trust_with_no_role_fails(self):
for assigned_role in self.assigned_roles:
self.assignment_api.remove_role_from_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
request_body = self.build_v2_token_request('TWO', 'two2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_expired_trust_get_token_fails(self):
expiry = "1999-02-18T10:10:00Z"
self.create_trust(expiry)
request_body = self.build_v2_token_request('TWO', 'two2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_token_from_trust_with_wrong_role_fails(self):
self.assignment_api.add_role_to_user_and_project(
self.trustor['id'],
self.tenant_bar['id'],
self.role_other['id'])
for assigned_role in self.assigned_roles:
self.assignment_api.remove_role_from_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
request_body = self.build_v2_token_request('TWO', 'two2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
class TokenExpirationTest(AuthTest):
@mock.patch.object(timeutils, 'utcnow')
def _maintain_token_expiration(self, mock_utcnow):
"""Token expiration should be maintained after re-auth & validation."""
now = datetime.datetime.utcnow()
mock_utcnow.return_value = now
r = self.controller.authenticate(
{},
auth={
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password']
}
})
unscoped_token_id = r['access']['token']['id']
original_expiration = r['access']['token']['expires']
mock_utcnow.return_value = now + datetime.timedelta(seconds=1)
r = self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=unscoped_token_id)
self.assertEqual(original_expiration, r['access']['token']['expires'])
mock_utcnow.return_value = now + datetime.timedelta(seconds=2)
r = self.controller.authenticate(
{},
auth={
'token': {
'id': unscoped_token_id,
},
'tenantId': self.tenant_bar['id'],
})
scoped_token_id = r['access']['token']['id']
self.assertEqual(original_expiration, r['access']['token']['expires'])
mock_utcnow.return_value = now + datetime.timedelta(seconds=3)
r = self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=scoped_token_id)
self.assertEqual(original_expiration, r['access']['token']['expires'])
def test_maintain_uuid_token_expiration(self):
self.config_fixture.config(group='signing', token_format='UUID')
self._maintain_token_expiration()
class AuthCatalog(tests.SQLDriverOverrides, AuthTest):
"""Tests for the catalog provided in the auth response."""
def config_files(self):
config_files = super(AuthCatalog, self).config_files()
# We need to use a backend that supports disabled endpoints, like the
# SQL backend.
config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
return config_files
def _create_endpoints(self):
def create_endpoint(service_id, region, **kwargs):
id_ = uuid.uuid4().hex
ref = {
'id': id_,
'interface': 'public',
'region': region,
'service_id': service_id,
'url': 'http://localhost/%s' % uuid.uuid4().hex,
}
ref.update(kwargs)
self.catalog_api.create_endpoint(id_, ref)
return ref
# Create a service for use with the endpoints.
def create_service(**kwargs):
id_ = uuid.uuid4().hex
ref = {
'id': id_,
'name': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
}
ref.update(kwargs)
self.catalog_api.create_service(id_, ref)
return ref
enabled_service_ref = create_service(enabled=True)
disabled_service_ref = create_service(enabled=False)
region = uuid.uuid4().hex
# Create endpoints
enabled_endpoint_ref = create_endpoint(
enabled_service_ref['id'], region)
create_endpoint(
enabled_service_ref['id'], region, enabled=False,
interface='internal')
create_endpoint(
disabled_service_ref['id'], region)
return enabled_endpoint_ref
def test_auth_catalog_disabled_endpoint(self):
"""On authenticate, get a catalog that excludes disabled endpoints."""
endpoint_ref = self._create_endpoints()
# Authenticate
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
token = self.controller.authenticate({}, body_dict)
# Check the catalog
self.assertEqual(1, len(token['access']['serviceCatalog']))
endpoint = token['access']['serviceCatalog'][0]['endpoints'][0]
self.assertEqual(
1, len(token['access']['serviceCatalog'][0]['endpoints']))
exp_endpoint = {
'id': endpoint_ref['id'],
'publicURL': endpoint_ref['url'],
'region': endpoint_ref['region'],
}
self.assertEqual(exp_endpoint, endpoint)
def test_validate_catalog_disabled_endpoint(self):
"""On validate, get back a catalog that excludes disabled endpoints."""
endpoint_ref = self._create_endpoints()
# Authenticate
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
token = self.controller.authenticate({}, body_dict)
# Validate
token_id = token['access']['token']['id']
validate_ref = self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=token_id)
# Check the catalog
self.assertEqual(1, len(token['access']['serviceCatalog']))
endpoint = validate_ref['access']['serviceCatalog'][0]['endpoints'][0]
self.assertEqual(
1, len(token['access']['serviceCatalog'][0]['endpoints']))
exp_endpoint = {
'id': endpoint_ref['id'],
'publicURL': endpoint_ref['url'],
'region': endpoint_ref['region'],
}
self.assertEqual(exp_endpoint, endpoint)
class NonDefaultAuthTest(tests.TestCase):
def test_add_non_default_auth_method(self):
self.config_fixture.config(group='auth',
methods=['password', 'token', 'custom'])
config.setup_authentication()
self.assertTrue(hasattr(CONF.auth, 'custom'))
|
apache-2.0
| 4,026,540,406,839,683,000 | -1,989,905,576,747,878,100 | 39.839122 | 79 | 0.575406 | false |
olysonek/tuned
|
tests/unit/profiles/test_profile.py
|
1
|
1691
|
import unittest2
import tuned.profiles
import collections
class MockProfile(tuned.profiles.profile.Profile):
def _create_unit(self, name, config):
return (name, config)
class ProfileTestCase(unittest2.TestCase):
def test_init(self):
MockProfile("test", {})
def test_create_units(self):
profile = MockProfile("test", {
"main": { "anything": 10 },
"network" : { "type": "net", "devices": "*" },
"storage" : { "type": "disk" },
})
self.assertIs(type(profile.units), collections.OrderedDict)
self.assertEqual(len(profile.units), 2)
self.assertListEqual(sorted([name_config for name_config in profile.units]), sorted(["network", "storage"]))
def test_create_units_empty(self):
profile = MockProfile("test", {"main":{}})
self.assertIs(type(profile.units), collections.OrderedDict)
self.assertEqual(len(profile.units), 0)
def test_sets_name(self):
profile1 = MockProfile("test_one", {})
profile2 = MockProfile("test_two", {})
self.assertEqual(profile1.name, "test_one")
self.assertEqual(profile2.name, "test_two")
def test_change_name(self):
profile = MockProfile("oldname", {})
self.assertEqual(profile.name, "oldname")
profile.name = "newname"
self.assertEqual(profile.name, "newname")
def test_sets_options(self):
profile = MockProfile("test", {
"main": { "anything": 10 },
"network" : { "type": "net", "devices": "*" },
})
self.assertIs(type(profile.options), dict)
self.assertEqual(profile.options["anything"], 10)
def test_sets_options_empty(self):
profile = MockProfile("test", {
"storage" : { "type": "disk" },
})
self.assertIs(type(profile.options), dict)
self.assertEqual(len(profile.options), 0)
|
gpl-2.0
| 5,307,427,957,775,375,000 | 6,619,492,738,336,257,000 | 28.155172 | 110 | 0.678297 | false |
AnselCmy/ARPS
|
report_crawler/report_crawler/spiders/spiders_001/_B/BNU001.py
|
2
|
1386
|
# -*- coding:utf-8 -*-
import scrapy
from report_crawler.spiders.__Global_function import get_localtime
from report_crawler.spiders.__Global_variable import now_time, end_time
class BNU001_Spider(scrapy.Spider):
name = 'BNU001'
start_urls = ['http://cist.bnu.edu.cn/tzgg/index.html']
domain = 'http://cist.bnu.edu.cn/tzgg/'
counts = 0
def parse(self, response):
messages = response.xpath("//div[@class='twelve columns alpha']/ul/li")
for i, message in enumerate(messages):
report_name = message.xpath(".//a/@title").extract()[0]
if u"【预告】" not in report_name or u"论坛" in report_name:
continue
report_time = get_localtime(message.xpath("span/text()").extract()[0])
if report_time > end_time:
continue
if report_time < now_time:
return
report_url = self.domain + message.xpath(".//a/@href").extract()[0]
yield scrapy.Request(report_url, callback=self.parse_pages,
meta={'link': report_url, 'number': i + 1, 'publication': report_time})
def parse_pages(self, response):
messages = response.xpath("//div[@class='heading']")
return {'text': messages, 'number': response.meta['number'], 'organizer': u"北京师范大学信息科学与技术学院",
'faculty': self.name, 'link': response.meta['link'], 'publication': response.meta['publication'],
'location': u"华北:北京市"}
|
mit
| 548,688,824,417,940,400 | -3,371,405,582,327,176,000 | 36.055556 | 107 | 0.65967 | false |
atgreen/bitcoin
|
qa/rpc-tests/smartfees.py
|
131
|
12419
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test fee estimation code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many many transactions without needing to spend
# time signing.
P2SH_1 = "2MySexEGVzZpRgNQ1JdjdP5bRETznm3roQ2" # P2SH of "OP_1 OP_DROP"
P2SH_2 = "2NBdpwq8Aoo1EEKEXPNrKvr5xQr3M9UfcZA" # P2SH of "OP_2 OP_DROP"
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
# 4 bytes of OP_TRUE and push 2-byte redeem script of "OP_1 OP_DROP" or "OP_2 OP_DROP"
SCRIPT_SIG = ["0451025175", "0451025275"]
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
'''
Create and send a transaction with a random fee.
The transaction pays to a trival P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)
'''
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment)*(1.1892**random.randint(0,28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
inputs = []
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} )
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} )
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in))
outputs = {}
outputs[P2SH_1] = total_in - amount - fee
outputs[P2SH_2] = amount
rawtx = from_node.createrawtransaction(inputs, outputs)
# Createrawtransaction constructions a transaction that is ready to be signed
# These transactions don't need to be signed, but we still have to insert the ScriptSig
# that will satisfy the ScriptPubKey.
completetx = rawtx[0:10]
inputnum = 0
for inp in inputs:
completetx += rawtx[10+82*inputnum:82+82*inputnum]
completetx += SCRIPT_SIG[inp["vout"]]
completetx += rawtx[84+82*inputnum:92+82*inputnum]
inputnum += 1
completetx += rawtx[10+82*inputnum:]
txid = from_node.sendrawtransaction(completetx, True)
unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee})
unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount})
return (completetx, fee)
def split_inputs(from_node, txins, txouts, initial_split = False):
'''
We need to generate a lot of very small inputs so we can generate a ton of transactions
and they will have low priority.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
'''
prevtxout = txins.pop()
inputs = []
outputs = {}
inputs.append({ "txid" : prevtxout["txid"], "vout" : prevtxout["vout"] })
half_change = satoshi_round(prevtxout["amount"]/2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
outputs[P2SH_1] = half_change
outputs[P2SH_2] = rem_change
rawtx = from_node.createrawtransaction(inputs, outputs)
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the property ScriptSig
if (initial_split) :
completetx = from_node.signrawtransaction(rawtx)["hex"]
else :
completetx = rawtx[0:82] + SCRIPT_SIG[prevtxout["vout"]] + rawtx[84:]
txid = from_node.sendrawtransaction(completetx, True)
txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change})
txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change})
def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
'''
This function calls estimatefee and verifies that the estimates
meet certain invariants.
'''
all_estimates = [ node.estimatefee(i) for i in range(1,26) ]
if print_estimates:
print([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]])
delta = 1.0e-6 # account for rounding error
last_e = max(fees_seen)
for e in filter(lambda x: x >= 0, all_estimates):
# Estimates should be within the bounds of what transactions fees actually were:
if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
%(float(e), min(fees_seen), max(fees_seen)))
# Estimates should be monotonically decreasing
if float(e)-delta > last_e:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
%(float(e),float(last_e)))
last_e = e
valid_estimate = False
invalid_estimates = 0
for e in all_estimates:
if e >= 0:
valid_estimate = True
else:
invalid_estimates += 1
# Once we're at a high enough confirmation count that we can give an estimate
# We should have estimates for all higher confirmation counts
if valid_estimate and e < 0:
raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate")
# Check on the expected number of different confirmation counts
# that we might not have valid estimates for
if invalid_estimates > max_invalid:
raise AssertionError("More than (%d) invalid estimates"%(max_invalid))
return all_estimates
class EstimateFeeTest(BitcoinTestFramework):
def setup_network(self):
'''
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of small low priority outputs
which we will use to generate our transactions.
'''
self.nodes = []
# Use node0 to mine blocks for input splitting
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000",
"-relaypriority=0", "-whitelist=127.0.0.1"]))
print("This test is time consuming, please be patient")
print("Splitting inputs to small size so we can generate low priority tx's")
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while (reps < 5):
#Double txouts to txouts2
while (len(self.txouts)>0):
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
#Double txouts2 to txouts
while (len(self.txouts2)>0):
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
reps += 1
print("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
# Node1 mines small blocks but that are bigger than the expected transaction rate,
# and allows free transactions.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# (17k is room enough for 110 or so transactions)
self.nodes.append(start_node(1, self.options.tmpdir,
["-blockprioritysize=1500", "-blockmaxsize=18000",
"-maxorphantx=1000", "-relaypriority=0", "-debug=estimatefee"]))
connect_nodes(self.nodes[1], 0)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 70 or so transactions)
node2args = ["-blockprioritysize=0", "-blockmaxsize=12000", "-maxorphantx=1000", "-relaypriority=0"]
self.nodes.append(start_node(2, self.options.tmpdir, node2args))
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[2], 1)
self.is_network_split = False
self.sync_all()
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for i in range(numblocks):
random.shuffle(self.confutxo)
for j in range(random.randrange(100-50,100+50)):
from_index = random.randint(1,2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex)/2)/1000.0
self.fees_per_kb.append(float(fee)/tx_kbytes)
sync_mempools(self.nodes[0:3],.1)
mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"]
sync_blocks(self.nodes[0:3],.1)
#update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def run_test(self):
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
print("Checking estimates for 1/2/3/6/15/25 blocks")
print("Creating transactions and mining them with a huge block size")
# Create transactions and mine 20 big blocks with node 0 such that the mempool is always emptied
self.transact_and_mine(30, self.nodes[0])
check_estimates(self.nodes[1], self.fees_per_kb, 1)
print("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 30 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(20, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb, 3)
print("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 40 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(40, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb, 2)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
sync_blocks(self.nodes[0:3],.1)
print("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb, 2)
if __name__ == '__main__':
EstimateFeeTest().main()
|
mit
| 2,855,994,109,217,559,000 | -8,582,041,303,988,211,000 | 47.135659 | 110 | 0.635961 | false |
ddelemeny/calligra
|
3rdparty/google-breakpad/src/tools/gyp/test/lib/TestCmd.py
|
330
|
52544
|
"""
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing
of executable commands and scripts (in any language, not just Python),
especially commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd
module manages and cleans up one or more temporary workspace
directories, and provides methods for creating files and directories in
those workspace directories from in-line data, here-documents), allowing
tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
import TestCmd
test = TestCmd.TestCmd()
There are a bunch of keyword arguments available at instantiation:
test = TestCmd.TestCmd(description = 'string',
program = 'program_or_script_to_test',
interpreter = 'script_interpreter',
workdir = 'prefix',
subdir = 'subdir',
verbose = Boolean,
match = default_match_function,
diff = default_diff_function,
combine = Boolean)
There are a bunch of methods that let you do different things:
test.verbose_set(1)
test.description_set('string')
test.program_set('program_or_script_to_test')
test.interpreter_set('script_interpreter')
test.interpreter_set(['script_interpreter', 'arg'])
test.workdir_set('prefix')
test.workdir_set('')
test.workpath('file')
test.workpath('subdir', 'file')
test.subdir('subdir', ...)
test.rmdir('subdir', ...)
test.write('file', "contents\n")
test.write(['subdir', 'file'], "contents\n")
test.read('file')
test.read(['subdir', 'file'])
test.read('file', mode)
test.read(['subdir', 'file'], mode)
test.writable('dir', 1)
test.writable('dir', None)
test.preserve(condition, ...)
test.cleanup(condition)
test.command_args(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program')
test.run(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
chdir = 'directory_to_chdir_to',
stdin = 'input to feed to the program\n')
universal_newlines = True)
p = test.start(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
universal_newlines = None)
test.finish(self, p)
test.pass_test()
test.pass_test(condition)
test.pass_test(condition, function)
test.fail_test()
test.fail_test(condition)
test.fail_test(condition, function)
test.fail_test(condition, function, skip)
test.no_result()
test.no_result(condition)
test.no_result(condition, function)
test.no_result(condition, function, skip)
test.stdout()
test.stdout(run)
test.stderr()
test.stderr(run)
test.symlink(target, link)
test.banner(string)
test.banner(string, width)
test.diff(actual, expected)
test.match(actual, expected)
test.match_exact("actual 1\nactual 2\n", "expected 1\nexpected 2\n")
test.match_exact(["actual 1\n", "actual 2\n"],
["expected 1\n", "expected 2\n"])
test.match_re("actual 1\nactual 2\n", regex_string)
test.match_re(["actual 1\n", "actual 2\n"], list_of_regexes)
test.match_re_dotall("actual 1\nactual 2\n", regex_string)
test.match_re_dotall(["actual 1\n", "actual 2\n"], list_of_regexes)
test.tempdir()
test.tempdir('temporary-directory')
test.sleep()
test.sleep(seconds)
test.where_is('foo')
test.where_is('foo', 'PATH1:PATH2')
test.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
test.unlink('file')
test.unlink('subdir', 'file')
The TestCmd module provides pass_test(), fail_test(), and no_result()
unbound functions that report test results for use with the Aegis change
management system. These methods terminate the test immediately,
reporting PASSED, FAILED, or NO RESULT respectively, and exiting with
status 0 (success), 1 or 2 respectively. This allows for a distinction
between an actual failed test and a test that could not be properly
evaluated because of an external condition (such as a full file system
or incorrect permissions).
import TestCmd
TestCmd.pass_test()
TestCmd.pass_test(condition)
TestCmd.pass_test(condition, function)
TestCmd.fail_test()
TestCmd.fail_test(condition)
TestCmd.fail_test(condition, function)
TestCmd.fail_test(condition, function, skip)
TestCmd.no_result()
TestCmd.no_result(condition)
TestCmd.no_result(condition, function)
TestCmd.no_result(condition, function, skip)
The TestCmd module also provides unbound functions that handle matching
in the same way as the match_*() methods described above.
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_exact)
test = TestCmd.TestCmd(match = TestCmd.match_re)
test = TestCmd.TestCmd(match = TestCmd.match_re_dotall)
The TestCmd module provides unbound functions that can be used for the
"diff" argument to TestCmd.TestCmd instantiation:
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_re,
diff = TestCmd.diff_re)
test = TestCmd.TestCmd(diff = TestCmd.simple_diff)
The "diff" argument can also be used with standard difflib functions:
import difflib
test = TestCmd.TestCmd(diff = difflib.context_diff)
test = TestCmd.TestCmd(diff = difflib.unified_diff)
Lastly, the where_is() method also exists in an unbound function
version.
import TestCmd
TestCmd.where_is('foo')
TestCmd.where_is('foo', 'PATH1:PATH2')
TestCmd.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
"""
# Copyright 2000-2010 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCmd.py 0.37.D001 2010/01/11 16:55:50 knight"
__version__ = "0.37"
import errno
import os
import os.path
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import traceback
import types
import UserList
__all__ = [
'diff_re',
'fail_test',
'no_result',
'pass_test',
'match_exact',
'match_re',
'match_re_dotall',
'python_executable',
'TestCmd'
]
try:
import difflib
except ImportError:
__all__.append('simple_diff')
def is_List(e):
return type(e) is types.ListType \
or isinstance(e, UserList.UserList)
try:
from UserString import UserString
except ImportError:
class UserString:
pass
if hasattr(types, 'UnicodeType'):
def is_String(e):
return type(e) is types.StringType \
or type(e) is types.UnicodeType \
or isinstance(e, UserString)
else:
def is_String(e):
return type(e) is types.StringType or isinstance(e, UserString)
tempfile.template = 'testcmd.'
if os.name in ('posix', 'nt'):
tempfile.template = 'testcmd.' + str(os.getpid()) + '.'
else:
tempfile.template = 'testcmd.'
re_space = re.compile('\s')
_Cleanup = []
_chain_to_exitfunc = None
def _clean():
global _Cleanup
cleanlist = filter(None, _Cleanup)
del _Cleanup[:]
cleanlist.reverse()
for test in cleanlist:
test.cleanup()
if _chain_to_exitfunc:
_chain_to_exitfunc()
try:
import atexit
except ImportError:
# TODO(1.5): atexit requires python 2.0, so chain sys.exitfunc
try:
_chain_to_exitfunc = sys.exitfunc
except AttributeError:
pass
sys.exitfunc = _clean
else:
atexit.register(_clean)
try:
zip
except NameError:
def zip(*lists):
result = []
for i in xrange(min(map(len, lists))):
result.append(tuple(map(lambda l, i=i: l[i], lists)))
return result
class Collector:
def __init__(self, top):
self.entries = [top]
def __call__(self, arg, dirname, names):
pathjoin = lambda n, d=dirname: os.path.join(d, n)
self.entries.extend(map(pathjoin, names))
def _caller(tblist, skip):
string = ""
arr = []
for file, line, name, text in tblist:
if file[-10:] == "TestCmd.py":
break
arr = [(file, line, name, text)] + arr
atfrom = "at"
for file, line, name, text in arr[skip:]:
if name in ("?", "<module>"):
name = ""
else:
name = " (" + name + ")"
string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
atfrom = "\tfrom"
return string
def fail_test(self = None, condition = 1, function = None, skip = 0):
"""Cause the test to fail.
By default, the fail_test() method reports that the test FAILED
and exits with a status of 1. If a condition argument is supplied,
the test fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = _caller(traceback.extract_stack(), skip)
sys.stderr.write("FAILED test" + of + desc + sep + at)
sys.exit(1)
def no_result(self = None, condition = 1, function = None, skip = 0):
"""Causes a test to exit with no valid result.
By default, the no_result() method reports NO RESULT for the test
and exits with a status of 2. If a condition argument is supplied,
the test fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
if os.environ.get('TESTCMD_DEBUG_SKIPS'):
at = _caller(traceback.extract_stack(), skip)
sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
else:
sys.stderr.write("NO RESULT\n")
sys.exit(2)
def pass_test(self = None, condition = 1, function = None):
"""Causes a test to pass.
By default, the pass_test() method reports PASSED for the test
and exits with a status of 0. If a condition argument is supplied,
the test passes only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
sys.stderr.write("PASSED\n")
sys.exit(0)
def match_exact(lines = None, matches = None):
"""
"""
if not is_List(lines):
lines = string.split(lines, "\n")
if not is_List(matches):
matches = string.split(matches, "\n")
if len(lines) != len(matches):
return
for i in range(len(lines)):
if lines[i] != matches[i]:
return
return 1
def match_re(lines = None, res = None):
"""
"""
if not is_List(lines):
lines = string.split(lines, "\n")
if not is_List(res):
res = string.split(res, "\n")
if len(lines) != len(res):
return
for i in range(len(lines)):
s = "^" + res[i] + "$"
try:
expr = re.compile(s)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if not expr.search(lines[i]):
return
return 1
def match_re_dotall(lines = None, res = None):
"""
"""
if not type(lines) is type(""):
lines = string.join(lines, "\n")
if not type(res) is type(""):
res = string.join(res, "\n")
s = "^" + res + "$"
try:
expr = re.compile(s, re.DOTALL)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if expr.match(lines):
return 1
try:
import difflib
except ImportError:
pass
else:
def simple_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A function with the same calling signature as difflib.context_diff
(diff -c) and difflib.unified_diff (diff -u) but which prints
output like the simple, unadorned 'diff" command.
"""
sm = difflib.SequenceMatcher(None, a, b)
def comma(x1, x2):
return x1+1 == x2 and str(x2) or '%s,%s' % (x1+1, x2)
result = []
for op, a1, a2, b1, b2 in sm.get_opcodes():
if op == 'delete':
result.append("%sd%d" % (comma(a1, a2), b1))
result.extend(map(lambda l: '< ' + l, a[a1:a2]))
elif op == 'insert':
result.append("%da%s" % (a1, comma(b1, b2)))
result.extend(map(lambda l: '> ' + l, b[b1:b2]))
elif op == 'replace':
result.append("%sc%s" % (comma(a1, a2), comma(b1, b2)))
result.extend(map(lambda l: '< ' + l, a[a1:a2]))
result.append('---')
result.extend(map(lambda l: '> ' + l, b[b1:b2]))
return result
def diff_re(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A simple "diff" of two sets of lines when the expected lines
are regular expressions. This is a really dumb thing that
just compares each line in turn, so it doesn't look for
chunks of matching lines and the like--but at least it lets
you know exactly which line first didn't compare correctl...
"""
result = []
diff = len(a) - len(b)
if diff < 0:
a = a + ['']*(-diff)
elif diff > 0:
b = b + ['']*diff
i = 0
for aline, bline in zip(a, b):
s = "^" + aline + "$"
try:
expr = re.compile(s)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if not expr.search(bline):
result.append("%sc%s" % (i+1, i+1))
result.append('< ' + repr(a[i]))
result.append('---')
result.append('> ' + repr(b[i]))
i = i+1
return result
if os.name == 'java':
python_executable = os.path.join(sys.prefix, 'jython')
else:
python_executable = sys.executable
if sys.platform == 'win32':
default_sleep_seconds = 2
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = string.split(path, os.pathsep)
if pathext is None:
pathext = os.environ['PATHEXT']
if is_String(pathext):
pathext = string.split(pathext, os.pathsep)
for ext in pathext:
if string.lower(ext) == string.lower(file[-len(ext):]):
pathext = ['']
break
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
else:
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = string.split(path, os.pathsep)
for dir in path:
f = os.path.join(dir, file)
if os.path.isfile(f):
try:
st = os.stat(f)
except OSError:
continue
if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
return f
return None
default_sleep_seconds = 1
try:
import subprocess
except ImportError:
# The subprocess module doesn't exist in this version of Python,
# so we're going to cobble up something that looks just enough
# like its API for our purposes below.
import new
subprocess = new.module('subprocess')
subprocess.PIPE = 'PIPE'
subprocess.STDOUT = 'STDOUT'
subprocess.mswindows = (sys.platform == 'win32')
try:
import popen2
popen2.Popen3
except AttributeError:
class Popen3:
universal_newlines = 1
def __init__(self, command, **kw):
if sys.platform == 'win32' and command[0] == '"':
command = '"' + command + '"'
(stdin, stdout, stderr) = os.popen3(' ' + command)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def close_output(self):
self.stdout.close()
self.resultcode = self.stderr.close()
def wait(self):
resultcode = self.resultcode
if os.WIFEXITED(resultcode):
return os.WEXITSTATUS(resultcode)
elif os.WIFSIGNALED(resultcode):
return os.WTERMSIG(resultcode)
else:
return None
else:
try:
popen2.Popen4
except AttributeError:
# A cribbed Popen4 class, with some retrofitted code from
# the Python 1.5 Popen3 class methods to do certain things
# by hand.
class Popen4(popen2.Popen3):
childerr = None
def __init__(self, cmd, bufsize=-1):
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
self.pid = os.fork()
if self.pid == 0:
# Child
os.dup2(p2cread, 0)
os.dup2(c2pwrite, 1)
os.dup2(c2pwrite, 2)
for i in range(3, popen2.MAXFD):
try:
os.close(i)
except: pass
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
# Shouldn't come here, I guess
os._exit(1)
os.close(p2cread)
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
popen2._active.append(self)
popen2.Popen4 = Popen4
class Popen3(popen2.Popen3, popen2.Popen4):
universal_newlines = 1
def __init__(self, command, **kw):
if kw.get('stderr') == 'STDOUT':
apply(popen2.Popen4.__init__, (self, command, 1))
else:
apply(popen2.Popen3.__init__, (self, command, 1))
self.stdin = self.tochild
self.stdout = self.fromchild
self.stderr = self.childerr
def wait(self, *args, **kw):
resultcode = apply(popen2.Popen3.wait, (self,)+args, kw)
if os.WIFEXITED(resultcode):
return os.WEXITSTATUS(resultcode)
elif os.WIFSIGNALED(resultcode):
return os.WTERMSIG(resultcode)
else:
return None
subprocess.Popen = Popen3
# From Josiah Carlson,
# ASPN : Python Cookbook : Module to allow Asynchronous subprocess use on Windows and Posix platforms
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440554
PIPE = subprocess.PIPE
if subprocess.mswindows:
from win32file import ReadFile, WriteFile
from win32pipe import PeekNamedPipe
import msvcrt
else:
import select
import fcntl
try: fcntl.F_GETFL
except AttributeError: fcntl.F_GETFL = 3
try: fcntl.F_SETFL
except AttributeError: fcntl.F_SETFL = 4
class Popen(subprocess.Popen):
def recv(self, maxsize=None):
return self._recv('stdout', maxsize)
def recv_err(self, maxsize=None):
return self._recv('stderr', maxsize)
def send_recv(self, input='', maxsize=None):
return self.send(input), self.recv(maxsize), self.recv_err(maxsize)
def get_conn_maxsize(self, which, maxsize):
if maxsize is None:
maxsize = 1024
elif maxsize < 1:
maxsize = 1
return getattr(self, which), maxsize
def _close(self, which):
getattr(self, which).close()
setattr(self, which, None)
if subprocess.mswindows:
def send(self, input):
if not self.stdin:
return None
try:
x = msvcrt.get_osfhandle(self.stdin.fileno())
(errCode, written) = WriteFile(x, input)
except ValueError:
return self._close('stdin')
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
x = msvcrt.get_osfhandle(conn.fileno())
(read, nAvail, nMessage) = PeekNamedPipe(x, 0)
if maxsize < nAvail:
nAvail = maxsize
if nAvail > 0:
(errCode, read) = ReadFile(x, nAvail, None)
except ValueError:
return self._close(which)
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close(which)
raise
#if self.universal_newlines:
# read = self._translate_newlines(read)
return read
else:
def send(self, input):
if not self.stdin:
return None
if not select.select([], [self.stdin], [], 0)[1]:
return 0
try:
written = os.write(self.stdin.fileno(), input)
except OSError, why:
if why[0] == errno.EPIPE: #broken pipe
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
flags = fcntl.fcntl(conn, fcntl.F_GETFL)
except TypeError:
flags = None
else:
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags| os.O_NONBLOCK)
try:
if not select.select([conn], [], [], 0)[0]:
return ''
r = conn.read(maxsize)
if not r:
return self._close(which)
#if self.universal_newlines:
# r = self._translate_newlines(r)
return r
finally:
if not conn.closed and not flags is None:
fcntl.fcntl(conn, fcntl.F_SETFL, flags)
disconnect_message = "Other end disconnected!"
def recv_some(p, t=.1, e=1, tr=5, stderr=0):
if tr < 1:
tr = 1
x = time.time()+t
y = []
r = ''
pr = p.recv
if stderr:
pr = p.recv_err
while time.time() < x or r:
r = pr()
if r is None:
if e:
raise Exception(disconnect_message)
else:
break
elif r:
y.append(r)
else:
time.sleep(max((x-time.time())/tr, 0))
return ''.join(y)
# TODO(3.0: rewrite to use memoryview()
def send_all(p, data):
while len(data):
sent = p.send(data)
if sent is None:
raise Exception(disconnect_message)
data = buffer(data, sent)
try:
object
except NameError:
class object:
pass
class TestCmd(object):
"""Class TestCmd
"""
def __init__(self, description = None,
program = None,
interpreter = None,
workdir = None,
subdir = None,
verbose = None,
match = None,
diff = None,
combine = 0,
universal_newlines = 1):
self._cwd = os.getcwd()
self.description_set(description)
self.program_set(program)
self.interpreter_set(interpreter)
if verbose is None:
try:
verbose = max( 0, int(os.environ.get('TESTCMD_VERBOSE', 0)) )
except ValueError:
verbose = 0
self.verbose_set(verbose)
self.combine = combine
self.universal_newlines = universal_newlines
if match is not None:
self.match_function = match
else:
self.match_function = match_re
if diff is not None:
self.diff_function = diff
else:
try:
difflib
except NameError:
pass
else:
self.diff_function = simple_diff
#self.diff_function = difflib.context_diff
#self.diff_function = difflib.unified_diff
self._dirlist = []
self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
if os.environ.has_key('PRESERVE') and not os.environ['PRESERVE'] is '':
self._preserve['pass_test'] = os.environ['PRESERVE']
self._preserve['fail_test'] = os.environ['PRESERVE']
self._preserve['no_result'] = os.environ['PRESERVE']
else:
try:
self._preserve['pass_test'] = os.environ['PRESERVE_PASS']
except KeyError:
pass
try:
self._preserve['fail_test'] = os.environ['PRESERVE_FAIL']
except KeyError:
pass
try:
self._preserve['no_result'] = os.environ['PRESERVE_NO_RESULT']
except KeyError:
pass
self._stdout = []
self._stderr = []
self.status = None
self.condition = 'no_result'
self.workdir_set(workdir)
self.subdir(subdir)
def __del__(self):
self.cleanup()
def __repr__(self):
return "%x" % id(self)
banner_char = '='
banner_width = 80
def banner(self, s, width=None):
if width is None:
width = self.banner_width
return s + self.banner_char * (width - len(s))
if os.name == 'posix':
def escape(self, arg):
"escape shell special characters"
slash = '\\'
special = '"$'
arg = string.replace(arg, slash, slash+slash)
for c in special:
arg = string.replace(arg, c, slash+c)
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
else:
# Windows does not allow special characters in file names
# anyway, so no need for an escape function, we will just quote
# the arg.
def escape(self, arg):
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
def canonicalize(self, path):
if is_List(path):
path = apply(os.path.join, tuple(path))
if not os.path.isabs(path):
path = os.path.join(self.workdir, path)
return path
def chmod(self, path, mode):
"""Changes permissions on the specified file or directory
path name."""
path = self.canonicalize(path)
os.chmod(path, mode)
def cleanup(self, condition = None):
"""Removes any temporary working directories for the specified
TestCmd environment. If the environment variable PRESERVE was
set when the TestCmd environment was created, temporary working
directories are not removed. If any of the environment variables
PRESERVE_PASS, PRESERVE_FAIL, or PRESERVE_NO_RESULT were set
when the TestCmd environment was created, then temporary working
directories are not removed if the test passed, failed, or had
no result, respectively. Temporary working directories are also
preserved for conditions specified via the preserve method.
Typically, this method is not called directly, but is used when
the script exits to clean up temporary working directories as
appropriate for the exit status.
"""
if not self._dirlist:
return
os.chdir(self._cwd)
self.workdir = None
if condition is None:
condition = self.condition
if self._preserve[condition]:
for dir in self._dirlist:
print "Preserved directory", dir
else:
list = self._dirlist[:]
list.reverse()
for dir in list:
self.writable(dir, 1)
shutil.rmtree(dir, ignore_errors = 1)
self._dirlist = []
try:
global _Cleanup
_Cleanup.remove(self)
except (AttributeError, ValueError):
pass
def command_args(self, program = None,
interpreter = None,
arguments = None):
if program:
if type(program) == type('') and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
else:
program = self.program
if not interpreter:
interpreter = self.interpreter
if not type(program) in [type([]), type(())]:
program = [program]
cmd = list(program)
if interpreter:
if not type(interpreter) in [type([]), type(())]:
interpreter = [interpreter]
cmd = list(interpreter) + cmd
if arguments:
if type(arguments) == type(''):
arguments = string.split(arguments)
cmd.extend(arguments)
return cmd
def description_set(self, description):
"""Set the description of the functionality being tested.
"""
self.description = description
try:
difflib
except NameError:
def diff(self, a, b, name, *args, **kw):
print self.banner('Expected %s' % name)
print a
print self.banner('Actual %s' % name)
print b
else:
def diff(self, a, b, name, *args, **kw):
print self.banner(name)
args = (a.splitlines(), b.splitlines()) + args
lines = apply(self.diff_function, args, kw)
for l in lines:
print l
def fail_test(self, condition = 1, function = None, skip = 0):
"""Cause the test to fail.
"""
if not condition:
return
self.condition = 'fail_test'
fail_test(self = self,
condition = condition,
function = function,
skip = skip)
def interpreter_set(self, interpreter):
"""Set the program to be used to interpret the program
under test as a script.
"""
self.interpreter = interpreter
def match(self, lines, matches):
"""Compare actual and expected file contents.
"""
return self.match_function(lines, matches)
def match_exact(self, lines, matches):
"""Compare actual and expected file contents.
"""
return match_exact(lines, matches)
def match_re(self, lines, res):
"""Compare actual and expected file contents.
"""
return match_re(lines, res)
def match_re_dotall(self, lines, res):
"""Compare actual and expected file contents.
"""
return match_re_dotall(lines, res)
def no_result(self, condition = 1, function = None, skip = 0):
"""Report that the test could not be run.
"""
if not condition:
return
self.condition = 'no_result'
no_result(self = self,
condition = condition,
function = function,
skip = skip)
def pass_test(self, condition = 1, function = None):
"""Cause the test to pass.
"""
if not condition:
return
self.condition = 'pass_test'
pass_test(self = self, condition = condition, function = function)
def preserve(self, *conditions):
"""Arrange for the temporary working directories for the
specified TestCmd environment to be preserved for one or more
conditions. If no conditions are specified, arranges for
the temporary working directories to be preserved for all
conditions.
"""
if conditions is ():
conditions = ('pass_test', 'fail_test', 'no_result')
for cond in conditions:
self._preserve[cond] = 1
def program_set(self, program):
"""Set the executable program or script to be tested.
"""
if program and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
self.program = program
def read(self, file, mode = 'rb'):
"""Reads and returns the contents of the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name. The I/O mode for the file may
be specified; it must begin with an 'r'. The default is
'rb' (binary read).
"""
file = self.canonicalize(file)
if mode[0] != 'r':
raise ValueError, "mode must begin with 'r'"
with open(file, mode) as f:
result = f.read()
return result
def rmdir(self, dir):
"""Removes the specified dir name.
The dir name may be a list, in which case the elements are
concatenated with the os.path.join() method. The dir is
assumed to be under the temporary working directory unless it
is an absolute path name.
The dir must be empty.
"""
dir = self.canonicalize(dir)
os.rmdir(dir)
def start(self, program = None,
interpreter = None,
arguments = None,
universal_newlines = None,
**kw):
"""
Starts a program or script for the test environment.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
cmd = self.command_args(program, interpreter, arguments)
cmd_string = string.join(map(self.escape, cmd), ' ')
if self.verbose:
sys.stderr.write(cmd_string + "\n")
if universal_newlines is None:
universal_newlines = self.universal_newlines
# On Windows, if we make stdin a pipe when we plan to send
# no input, and the test program exits before
# Popen calls msvcrt.open_osfhandle, that call will fail.
# So don't use a pipe for stdin if we don't need one.
stdin = kw.get('stdin', None)
if stdin is not None:
stdin = subprocess.PIPE
combine = kw.get('combine', self.combine)
if combine:
stderr_value = subprocess.STDOUT
else:
stderr_value = subprocess.PIPE
return Popen(cmd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=stderr_value,
universal_newlines=universal_newlines)
def finish(self, popen, **kw):
"""
Finishes and waits for the process being run under control of
the specified popen argument, recording the exit status,
standard output and error output.
"""
popen.stdin.close()
self.status = popen.wait()
if not self.status:
self.status = 0
self._stdout.append(popen.stdout.read())
if popen.stderr:
stderr = popen.stderr.read()
else:
stderr = ''
self._stderr.append(stderr)
def run(self, program = None,
interpreter = None,
arguments = None,
chdir = None,
stdin = None,
universal_newlines = None):
"""Runs a test of the program or script for the test
environment. Standard output and error output are saved for
future retrieval via the stdout() and stderr() methods.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
if chdir:
oldcwd = os.getcwd()
if not os.path.isabs(chdir):
chdir = os.path.join(self.workpath(chdir))
if self.verbose:
sys.stderr.write("chdir(" + chdir + ")\n")
os.chdir(chdir)
p = self.start(program,
interpreter,
arguments,
universal_newlines,
stdin=stdin)
if stdin:
if is_List(stdin):
for line in stdin:
p.stdin.write(line)
else:
p.stdin.write(stdin)
p.stdin.close()
out = p.stdout.read()
if p.stderr is None:
err = ''
else:
err = p.stderr.read()
try:
close_output = p.close_output
except AttributeError:
p.stdout.close()
if not p.stderr is None:
p.stderr.close()
else:
close_output()
self._stdout.append(out)
self._stderr.append(err)
self.status = p.wait()
if not self.status:
self.status = 0
if chdir:
os.chdir(oldcwd)
if self.verbose >= 2:
write = sys.stdout.write
write('============ STATUS: %d\n' % self.status)
out = self.stdout()
if out or self.verbose >= 3:
write('============ BEGIN STDOUT (len=%d):\n' % len(out))
write(out)
write('============ END STDOUT\n')
err = self.stderr()
if err or self.verbose >= 3:
write('============ BEGIN STDERR (len=%d)\n' % len(err))
write(err)
write('============ END STDERR\n')
def sleep(self, seconds = default_sleep_seconds):
"""Sleeps at least the specified number of seconds. If no
number is specified, sleeps at least the minimum number of
seconds necessary to advance file time stamps on the current
system. Sleeping more seconds is all right.
"""
time.sleep(seconds)
def stderr(self, run = None):
"""Returns the error output from the specified run number.
If there is no specified run number, then returns the error
output of the last run. If the run number is less than zero,
then returns the error output from that many runs back from the
current run.
"""
if not run:
run = len(self._stderr)
elif run < 0:
run = len(self._stderr) + run
run = run - 1
return self._stderr[run]
def stdout(self, run = None):
"""Returns the standard output from the specified run number.
If there is no specified run number, then returns the standard
output of the last run. If the run number is less than zero,
then returns the standard output from that many runs back from
the current run.
"""
if not run:
run = len(self._stdout)
elif run < 0:
run = len(self._stdout) + run
run = run - 1
return self._stdout[run]
def subdir(self, *subdirs):
"""Create new subdirectories under the temporary working
directory, one for each argument. An argument may be a list,
in which case the list elements are concatenated using the
os.path.join() method. Subdirectories multiple levels deep
must be created using a separate argument for each level:
test.subdir('sub', ['sub', 'dir'], ['sub', 'dir', 'ectory'])
Returns the number of subdirectories actually created.
"""
count = 0
for sub in subdirs:
if sub is None:
continue
if is_List(sub):
sub = apply(os.path.join, tuple(sub))
new = os.path.join(self.workdir, sub)
try:
os.mkdir(new)
except OSError:
pass
else:
count = count + 1
return count
def symlink(self, target, link):
"""Creates a symlink to the specified target.
The link name may be a list, in which case the elements are
concatenated with the os.path.join() method. The link is
assumed to be under the temporary working directory unless it
is an absolute path name. The target is *not* assumed to be
under the temporary working directory.
"""
link = self.canonicalize(link)
os.symlink(target, link)
def tempdir(self, path=None):
"""Creates a temporary directory.
A unique directory name is generated if no path name is specified.
The directory is created, and will be removed when the TestCmd
object is destroyed.
"""
if path is None:
try:
path = tempfile.mktemp(prefix=tempfile.template)
except TypeError:
path = tempfile.mktemp()
os.mkdir(path)
# Symlinks in the path will report things
# differently from os.getcwd(), so chdir there
# and back to fetch the canonical path.
cwd = os.getcwd()
try:
os.chdir(path)
path = os.getcwd()
finally:
os.chdir(cwd)
# Uppercase the drive letter since the case of drive
# letters is pretty much random on win32:
drive,rest = os.path.splitdrive(path)
if drive:
path = string.upper(drive) + rest
#
self._dirlist.append(path)
global _Cleanup
try:
_Cleanup.index(self)
except ValueError:
_Cleanup.append(self)
return path
def touch(self, path, mtime=None):
"""Updates the modification time on the specified file or
directory path name. The default is to update to the
current time if no explicit modification time is specified.
"""
path = self.canonicalize(path)
atime = os.path.getatime(path)
if mtime is None:
mtime = time.time()
os.utime(path, (atime, mtime))
def unlink(self, file):
"""Unlinks the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name.
"""
file = self.canonicalize(file)
os.unlink(file)
def verbose_set(self, verbose):
"""Set the verbose level.
"""
self.verbose = verbose
def where_is(self, file, path=None, pathext=None):
"""Find an executable file.
"""
if is_List(file):
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = where_is(file, path, pathext)
return file
def workdir_set(self, path):
"""Creates a temporary working directory with the specified
path name. If the path is a null string (''), a unique
directory name is created.
"""
if (path != None):
if path == '':
path = None
path = self.tempdir(path)
self.workdir = path
def workpath(self, *args):
"""Returns the absolute path name to a subdirectory or file
within the current temporary working directory. Concatenates
the temporary working directory name with the specified
arguments using the os.path.join() method.
"""
return apply(os.path.join, (self.workdir,) + tuple(args))
def readable(self, top, read=1):
"""Make the specified directory tree readable (read == 1)
or not (read == None).
This method has no effect on Windows systems, which use a
completely different mechanism to control file readability.
"""
if sys.platform == 'win32':
return
if read:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IREAD))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IREAD))
if os.path.isfile(top):
# If it's a file, that's easy, just chmod it.
do_chmod(top)
elif read:
# It's a directory and we're trying to turn on read
# permission, so it's also pretty easy, just chmod the
# directory and then chmod every entry on our walk down the
# tree. Because os.path.walk() is top-down, we'll enable
# read permission on any directories that have it disabled
# before os.path.walk() tries to list their contents.
do_chmod(top)
def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
for n in names:
do_chmod(os.path.join(dirname, n))
os.path.walk(top, chmod_entries, None)
else:
# It's a directory and we're trying to turn off read
# permission, which means we have to chmod the directoreis
# in the tree bottom-up, lest disabling read permission from
# the top down get in the way of being able to get at lower
# parts of the tree. But os.path.walk() visits things top
# down, so we just use an object to collect a list of all
# of the entries in the tree, reverse the list, and then
# chmod the reversed (bottom-up) list.
col = Collector(top)
os.path.walk(top, col, None)
col.entries.reverse()
for d in col.entries: do_chmod(d)
def writable(self, top, write=1):
"""Make the specified directory tree writable (write == 1)
or not (write == None).
"""
if sys.platform == 'win32':
if write:
def do_chmod(fname):
try: os.chmod(fname, stat.S_IWRITE)
except OSError: pass
else:
def do_chmod(fname):
try: os.chmod(fname, stat.S_IREAD)
except OSError: pass
else:
if write:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|0200))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~0200))
if os.path.isfile(top):
do_chmod(top)
else:
col = Collector(top)
os.path.walk(top, col, None)
for d in col.entries: do_chmod(d)
def executable(self, top, execute=1):
"""Make the specified directory tree executable (execute == 1)
or not (execute == None).
This method has no effect on Windows systems, which use a
completely different mechanism to control file executability.
"""
if sys.platform == 'win32':
return
if execute:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IEXEC))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IEXEC))
if os.path.isfile(top):
# If it's a file, that's easy, just chmod it.
do_chmod(top)
elif execute:
# It's a directory and we're trying to turn on execute
# permission, so it's also pretty easy, just chmod the
# directory and then chmod every entry on our walk down the
# tree. Because os.path.walk() is top-down, we'll enable
# execute permission on any directories that have it disabled
# before os.path.walk() tries to list their contents.
do_chmod(top)
def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
for n in names:
do_chmod(os.path.join(dirname, n))
os.path.walk(top, chmod_entries, None)
else:
# It's a directory and we're trying to turn off execute
# permission, which means we have to chmod the directories
# in the tree bottom-up, lest disabling execute permission from
# the top down get in the way of being able to get at lower
# parts of the tree. But os.path.walk() visits things top
# down, so we just use an object to collect a list of all
# of the entries in the tree, reverse the list, and then
# chmod the reversed (bottom-up) list.
col = Collector(top)
os.path.walk(top, col, None)
col.entries.reverse()
for d in col.entries: do_chmod(d)
def write(self, file, content, mode = 'wb'):
"""Writes the specified content text (second argument) to the
specified file name (first argument). The file name may be
a list, in which case the elements are concatenated with the
os.path.join() method. The file is created under the temporary
working directory. Any subdirectories in the path must already
exist. The I/O mode for the file may be specified; it must
begin with a 'w'. The default is 'wb' (binary write).
"""
file = self.canonicalize(file)
if mode[0] != 'w':
raise ValueError, "mode must begin with 'w'"
with open(file, mode) as f:
f.write(content)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gpl-2.0
| -2,039,543,507,259,026,000 | 5,010,896,834,675,967,000 | 31.901691 | 101 | 0.551005 | false |
ShassAro/ShassAro
|
DockerAdmin/dockerVirtualEnv/lib/python2.7/site-packages/pip/download.py
|
328
|
22580
|
import cgi
import email.utils
import hashlib
import getpass
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
import pip
from pip.backwardcompat import urllib, urlparse, raw_input
from pip.exceptions import InstallationError, HashMismatch
from pip.util import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
create_download_cache_folder, cache_download)
from pip.vcs import vcs
from pip.log import logger
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.compat import IncompleteRead
from pip._vendor.requests.exceptions import InvalidURL, ChunkedEncodingError
from pip._vendor.requests.models import Response
from pip._vendor.requests.structures import CaseInsensitiveDict
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url', 'unpack_http_url']
def user_agent():
"""Return a string representing the user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([
_implementation_version,
sys.pypy_version_info.releaselevel,
])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['pip/%s' % pip.__version__,
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urlparse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.split("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urlparse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simple return the response
if not self.prompting:
return resp
parsed = urlparse.urlparse(resp.url)
# Prompt the user for a new username and password
username = raw_input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSResponse(object):
def __init__(self, fileobj):
self.fileobj = fileobj
def __getattr__(self, name):
return getattr(self.fileobj, name)
def read(self, amt=None, decode_content=None, cache_content=False):
return self.fileobj.read(amt)
# Insert Hacks to Make Cookie Jar work w/ Requests
@property
def _original_response(self):
class FakeMessage(object):
def getheaders(self, header):
return []
def get_all(self, header, default):
return []
class FakeResponse(object):
@property
def msg(self):
return FakeMessage()
return FakeResponse()
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
parsed_url = urlparse.urlparse(request.url)
# We only work for requests with a host of localhost
if parsed_url.netloc.lower() != "localhost":
raise InvalidURL("Invalid URL %r: Only localhost is allowed" %
request.url)
real_url = urlparse.urlunparse(parsed_url[:1] + ("",) + parsed_url[2:])
pathname = url_to_path(real_url)
resp = Response()
resp.status_code = 200
resp.url = real_url
stats = os.stat(pathname)
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
resp.headers = CaseInsensitiveDict({
"Content-Type": mimetypes.guess_type(pathname)[0] or "text/plain",
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = LocalFSResponse(open(pathname, "rb"))
resp.close = resp.raw.close
return resp
def close(self):
pass
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
def request(self, method, url, *args, **kwargs):
# Make file:// urls not fail due to lack of a hostname
parsed = urlparse.urlparse(url)
if parsed.scheme == "file":
url = urlparse.urlunparse(parsed[:1] + ("localhost",) + parsed[2:])
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
session = PipSession()
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from
and comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
## FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
if six.PY3:
return resp.url, resp.text
else:
return resp.url, resp.content
try:
f = open(url)
content = f.read()
except IOError:
e = sys.exc_info()[1]
raise InstallationError('Could not open requirements file: %s' % str(e))
else:
f.close()
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
path = url[len('file:'):].lstrip('/')
path = urllib.unquote(path)
if _url_drive_re.match(path):
path = path[0] + ':' + path[2:]
else:
path = '/' + path
return path
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join([urllib.quote(part) for part in filepath])
if not drive:
url = url.lstrip('/')
return 'file:///' + drive + url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
archives = ('.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.pybundle',
'.whl')
ext = splitext(name)[1].lower()
if ext in archives:
return True
return False
def unpack_vcs_link(link, location, only_download=False):
vcs_backend = _get_used_vcs_backend(link)
if only_download:
vcs_backend.export(location)
else:
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def _check_hash(download_hash, link):
if download_hash.digest_size != hashlib.new(link.hash_name).digest_size:
logger.fatal("Hash digest size of the package %d (%s) doesn't match the expected hash name %s!"
% (download_hash.digest_size, link, link.hash_name))
raise HashMismatch('Hash name mismatch for package %s' % link)
if download_hash.hexdigest() != link.hash:
logger.fatal("Hash of the package %s (%s) doesn't match the expected hash %s!"
% (link, download_hash.hexdigest(), link.hash))
raise HashMismatch('Bad %s hash for package %s' % (link.hash_name, link))
def _get_hash_from_file(target_file, link):
try:
download_hash = hashlib.new(link.hash_name)
except (ValueError, TypeError):
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
return None
fp = open(target_file, 'rb')
while True:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
fp.close()
return download_hash
def _download_url(resp, link, temp_location):
fp = open(temp_location, 'wb')
download_hash = None
if link.hash and link.hash_name:
try:
download_hash = hashlib.new(link.hash_name)
except ValueError:
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
downloaded = 0
show_progress = total_length > 40 * 1000 or not total_length
show_url = link.show_url
try:
if show_progress:
## FIXME: the URL can get really long in this message:
if total_length:
logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
else:
logger.start_progress('Downloading %s (unknown size): ' % show_url)
else:
logger.notify('Downloading %s' % show_url)
logger.info('Downloading from URL %s' % link)
def resp_read(chunk_size):
try:
# Special case for urllib3.
try:
for chunk in resp.raw.stream(
chunk_size, decode_content=False):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
for chunk in resp_read(4096):
downloaded += len(chunk)
if show_progress:
if not total_length:
logger.show_progress('%s' % format_size(downloaded))
else:
logger.show_progress('%3i%% %s' % (100 * downloaded / total_length, format_size(downloaded)))
if download_hash is not None:
download_hash.update(chunk)
fp.write(chunk)
fp.close()
finally:
if show_progress:
logger.end_progress('%s downloaded' % format_size(downloaded))
return download_hash
def _copy_file(filename, location, content_type, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warn('Backing up %s to %s'
% (display_path(download_location), display_path(dest_file)))
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.notify('Saved %s' % display_path(download_location))
def unpack_http_url(link, location, download_cache, download_dir=None,
session=None):
if session is None:
session = PipSession()
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
temp_location = None
target_url = link.url.split('#', 1)[0]
already_cached = False
cache_file = None
cache_content_type_file = None
download_hash = None
# If a download cache is specified, is the file cached there?
if download_cache:
cache_file = os.path.join(download_cache,
urllib.quote(target_url, ''))
cache_content_type_file = cache_file + '.content-type'
already_cached = (
os.path.exists(cache_file) and
os.path.exists(cache_content_type_file)
)
if not os.path.isdir(download_cache):
create_download_cache_folder(download_cache)
# If a download dir is specified, is the file already downloaded there?
already_downloaded = None
if download_dir:
already_downloaded = os.path.join(download_dir, link.filename)
if not os.path.exists(already_downloaded):
already_downloaded = None
# If already downloaded, does it's hash match?
if already_downloaded:
temp_location = already_downloaded
content_type = mimetypes.guess_type(already_downloaded)[0]
logger.notify('File was already downloaded %s' % already_downloaded)
if link.hash:
download_hash = _get_hash_from_file(temp_location, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(already_downloaded)
already_downloaded = None
# If not a valid download, let's confirm the cached file is valid
if already_cached and not temp_location:
with open(cache_content_type_file) as fp:
content_type = fp.read().strip()
temp_location = cache_file
logger.notify('Using download cache from %s' % cache_file)
if link.hash and link.hash_name:
download_hash = _get_hash_from_file(cache_file, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Cached file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(cache_file)
os.unlink(cache_content_type_file)
already_cached = False
# We don't have either a cached or a downloaded copy
# let's download to a tmp dir
if not temp_location:
try:
resp = session.get(target_url, stream=True)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.fatal("HTTP error %s while getting %s" %
(exc.response.status_code, link))
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
temp_location = os.path.join(temp_dir, filename)
download_hash = _download_url(resp, link, temp_location)
if link.hash and link.hash_name:
_check_hash(download_hash, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded:
_copy_file(temp_location, download_dir, content_type, link)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(temp_location, location, content_type, link)
# if using a download cache, cache it, if needed
if cache_file and not already_cached:
cache_download(cache_file, temp_location, content_type)
if not (already_cached or already_downloaded):
os.unlink(temp_location)
os.rmdir(temp_dir)
def unpack_file_url(link, location, download_dir=None):
link_path = url_to_path(link.url_without_fragment)
already_downloaded = False
# If it's a url to a local directory
if os.path.isdir(link_path):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
return
# if link has a hash, let's confirm it matches
if link.hash:
link_path_hash = _get_hash_from_file(link_path, link)
_check_hash(link_path_hash, link)
# If a download dir is specified, is the file already there and valid?
if download_dir:
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
content_type = mimetypes.guess_type(download_path)[0]
logger.notify('File was already downloaded %s' % download_path)
if link.hash:
download_hash = _get_hash_from_file(download_path, link)
try:
_check_hash(download_hash, link)
already_downloaded = True
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % link_path
)
os.unlink(download_path)
else:
already_downloaded = True
if already_downloaded:
from_path = download_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded:
_copy_file(from_path, download_dir, content_type, link)
|
gpl-2.0
| -4,566,514,387,347,521,000 | 505,368,244,023,518,850 | 34.062112 | 114 | 0.587467 | false |
JerryLX/DPDK
|
tools/dpdk-devbind.py
|
2
|
20921
|
#! /usr/bin/python
#
# BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import os
import getopt
import subprocess
from os.path import exists, abspath, dirname, basename
# The PCI base class for NETWORK devices
NETWORK_BASE_CLASS = "02"
# global dict ethernet devices present. Dictionary indexed by PCI address.
# Each device within this is itself a dictionary of device properties
devices = {}
# list of supported DPDK drivers
dpdk_drivers = ["igb_uio", "vfio-pci", "uio_pci_generic"]
# command-line arg flags
b_flag = None
status_flag = False
force_flag = False
args = []
def usage():
'''Print usage information for the program'''
argv0 = basename(sys.argv[0])
print("""
Usage:
------
%(argv0)s [options] DEVICE1 DEVICE2 ....
where DEVICE1, DEVICE2 etc, are specified via PCI "domain:bus:slot.func" syntax
or "bus:slot.func" syntax. For devices bound to Linux kernel drivers, they may
also be referred to by Linux interface name e.g. eth0, eth1, em0, em1, etc.
Options:
--help, --usage:
Display usage information and quit
-s, --status:
Print the current status of all known network interfaces.
For each device, it displays the PCI domain, bus, slot and function,
along with a text description of the device. Depending upon whether the
device is being used by a kernel driver, the igb_uio driver, or no
driver, other relevant information will be displayed:
* the Linux interface name e.g. if=eth0
* the driver being used e.g. drv=igb_uio
* any suitable drivers not currently using that device
e.g. unused=igb_uio
NOTE: if this flag is passed along with a bind/unbind option, the
status display will always occur after the other operations have taken
place.
-b driver, --bind=driver:
Select the driver to use or \"none\" to unbind the device
-u, --unbind:
Unbind a device (Equivalent to \"-b none\")
--force:
By default, devices which are used by Linux - as indicated by having
routes in the routing table - cannot be modified. Using the --force
flag overrides this behavior, allowing active links to be forcibly
unbound.
WARNING: This can lead to loss of network connection and should be used
with caution.
Examples:
---------
To display current device status:
%(argv0)s --status
To bind eth1 from the current driver and move to use igb_uio
%(argv0)s --bind=igb_uio eth1
To unbind 0000:01:00.0 from using any driver
%(argv0)s -u 0000:01:00.0
To bind 0000:02:00.0 and 0000:02:00.1 to the ixgbe kernel driver
%(argv0)s -b ixgbe 02:00.0 02:00.1
""" % locals()) # replace items from local variables
# This is roughly compatible with check_output function in subprocess module
# which is only available in python 2.7.
def check_output(args, stderr=None):
'''Run a command and capture its output'''
return subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=stderr).communicate()[0]
def find_module(mod):
'''find the .ko file for kernel module named mod.
Searches the $RTE_SDK/$RTE_TARGET directory, the kernel
modules directory and finally under the parent directory of
the script '''
# check $RTE_SDK/$RTE_TARGET directory
if 'RTE_SDK' in os.environ and 'RTE_TARGET' in os.environ:
path = "%s/%s/kmod/%s.ko" % (os.environ['RTE_SDK'],
os.environ['RTE_TARGET'], mod)
if exists(path):
return path
# check using depmod
try:
depmod_out = check_output(["modinfo", "-n", mod],
stderr=subprocess.STDOUT).lower()
if "error" not in depmod_out:
path = depmod_out.strip()
if exists(path):
return path
except: # if modinfo can't find module, it fails, so continue
pass
# check for a copy based off current path
tools_dir = dirname(abspath(sys.argv[0]))
if (tools_dir.endswith("tools")):
base_dir = dirname(tools_dir)
find_out = check_output(["find", base_dir, "-name", mod + ".ko"])
if len(find_out) > 0: # something matched
path = find_out.splitlines()[0]
if exists(path):
return path
def check_modules():
'''Checks that igb_uio is loaded'''
global dpdk_drivers
# list of supported modules
mods = [{"Name": driver, "Found": False} for driver in dpdk_drivers]
# first check if module is loaded
try:
# Get list of sysfs modules (both built-in and dynamically loaded)
sysfs_path = '/sys/module/'
# Get the list of directories in sysfs_path
sysfs_mods = [os.path.join(sysfs_path, o) for o
in os.listdir(sysfs_path)
if os.path.isdir(os.path.join(sysfs_path, o))]
# Extract the last element of '/sys/module/abc' in the array
sysfs_mods = [a.split('/')[-1] for a in sysfs_mods]
# special case for vfio_pci (module is named vfio-pci,
# but its .ko is named vfio_pci)
sysfs_mods = map(lambda a:
a if a != 'vfio_pci' else 'vfio-pci', sysfs_mods)
for mod in mods:
if mod["Name"] in sysfs_mods:
mod["Found"] = True
except:
pass
# check if we have at least one loaded module
if True not in [mod["Found"] for mod in mods] and b_flag is not None:
if b_flag in dpdk_drivers:
print("Error - no supported modules(DPDK driver) are loaded")
sys.exit(1)
else:
print("Warning - no supported modules(DPDK driver) are loaded")
# change DPDK driver list to only contain drivers that are loaded
dpdk_drivers = [mod["Name"] for mod in mods if mod["Found"]]
def has_driver(dev_id):
'''return true if a device is assigned to a driver. False otherwise'''
return "Driver_str" in devices[dev_id]
def get_pci_device_details(dev_id):
'''This function gets additional details for a PCI device'''
device = {}
extra_info = check_output(["lspci", "-vmmks", dev_id]).splitlines()
# parse lspci details
for line in extra_info:
if len(line) == 0:
continue
name, value = line.decode().split("\t", 1)
name = name.strip(":") + "_str"
device[name] = value
# check for a unix interface name
device["Interface"] = ""
for base, dirs, _ in os.walk("/sys/bus/pci/devices/%s/" % dev_id):
if "net" in dirs:
device["Interface"] = \
",".join(os.listdir(os.path.join(base, "net")))
break
# check if a port is used for ssh connection
device["Ssh_if"] = False
device["Active"] = ""
return device
def get_nic_details():
'''This function populates the "devices" dictionary. The keys used are
the pci addresses (domain:bus:slot.func). The values are themselves
dictionaries - one for each NIC.'''
global devices
global dpdk_drivers
# clear any old data
devices = {}
# first loop through and read details for all devices
# request machine readable format, with numeric IDs
dev = {}
dev_lines = check_output(["lspci", "-Dvmmn"]).splitlines()
for dev_line in dev_lines:
if (len(dev_line) == 0):
if dev["Class"][0:2] == NETWORK_BASE_CLASS:
# convert device and vendor ids to numbers, then add to global
dev["Vendor"] = int(dev["Vendor"], 16)
dev["Device"] = int(dev["Device"], 16)
# use dict to make copy of dev
devices[dev["Slot"]] = dict(dev)
else:
name, value = dev_line.decode().split("\t", 1)
dev[name.rstrip(":")] = value
# check what is the interface if any for an ssh connection if
# any to this host, so we can mark it later.
ssh_if = []
route = check_output(["ip", "-o", "route"])
# filter out all lines for 169.254 routes
route = "\n".join(filter(lambda ln: not ln.startswith("169.254"),
route.decode().splitlines()))
rt_info = route.split()
for i in range(len(rt_info) - 1):
if rt_info[i] == "dev":
ssh_if.append(rt_info[i+1])
# based on the basic info, get extended text details
for d in devices.keys():
# get additional info and add it to existing data
devices[d] = devices[d].copy()
devices[d].update(get_pci_device_details(d).items())
for _if in ssh_if:
if _if in devices[d]["Interface"].split(","):
devices[d]["Ssh_if"] = True
devices[d]["Active"] = "*Active*"
break
# add igb_uio to list of supporting modules if needed
if "Module_str" in devices[d]:
for driver in dpdk_drivers:
if driver not in devices[d]["Module_str"]:
devices[d]["Module_str"] = \
devices[d]["Module_str"] + ",%s" % driver
else:
devices[d]["Module_str"] = ",".join(dpdk_drivers)
# make sure the driver and module strings do not have any duplicates
if has_driver(d):
modules = devices[d]["Module_str"].split(",")
if devices[d]["Driver_str"] in modules:
modules.remove(devices[d]["Driver_str"])
devices[d]["Module_str"] = ",".join(modules)
def dev_id_from_dev_name(dev_name):
'''Take a device "name" - a string passed in by user to identify a NIC
device, and determine the device id - i.e. the domain:bus:slot.func - for
it, which can then be used to index into the devices array'''
# check if it's already a suitable index
if dev_name in devices:
return dev_name
# check if it's an index just missing the domain part
elif "0000:" + dev_name in devices:
return "0000:" + dev_name
else:
# check if it's an interface name, e.g. eth1
for d in devices.keys():
if dev_name in devices[d]["Interface"].split(","):
return devices[d]["Slot"]
# if nothing else matches - error
print("Unknown device: %s. "
"Please specify device in \"bus:slot.func\" format" % dev_name)
sys.exit(1)
def unbind_one(dev_id, force):
'''Unbind the device identified by "dev_id" from its current driver'''
dev = devices[dev_id]
if not has_driver(dev_id):
print("%s %s %s is not currently managed by any driver\n" %
(dev["Slot"], dev["Device_str"], dev["Interface"]))
return
# prevent us disconnecting ourselves
if dev["Ssh_if"] and not force:
print("Routing table indicates that interface %s is active. "
"Skipping unbind" % (dev_id))
return
# write to /sys to unbind
filename = "/sys/bus/pci/drivers/%s/unbind" % dev["Driver_str"]
try:
f = open(filename, "a")
except:
print("Error: unbind failed for %s - Cannot open %s"
% (dev_id, filename))
sys.exit(1)
f.write(dev_id)
f.close()
def bind_one(dev_id, driver, force):
'''Bind the device given by "dev_id" to the driver "driver". If the device
is already bound to a different driver, it will be unbound first'''
dev = devices[dev_id]
saved_driver = None # used to rollback any unbind in case of failure
# prevent disconnection of our ssh session
if dev["Ssh_if"] and not force:
print("Routing table indicates that interface %s is active. "
"Not modifying" % (dev_id))
return
# unbind any existing drivers we don't want
if has_driver(dev_id):
if dev["Driver_str"] == driver:
print("%s already bound to driver %s, skipping\n"
% (dev_id, driver))
return
else:
saved_driver = dev["Driver_str"]
unbind_one(dev_id, force)
dev["Driver_str"] = "" # clear driver string
# if we are binding to one of DPDK drivers, add PCI id's to that driver
if driver in dpdk_drivers:
filename = "/sys/bus/pci/drivers/%s/new_id" % driver
try:
f = open(filename, "w")
except:
print("Error: bind failed for %s - Cannot open %s"
% (dev_id, filename))
return
try:
f.write("%04x %04x" % (dev["Vendor"], dev["Device"]))
f.close()
except:
print("Error: bind failed for %s - Cannot write new PCI ID to "
"driver %s" % (dev_id, driver))
return
# do the bind by writing to /sys
filename = "/sys/bus/pci/drivers/%s/bind" % driver
try:
f = open(filename, "a")
except:
print("Error: bind failed for %s - Cannot open %s"
% (dev_id, filename))
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
try:
f.write(dev_id)
f.close()
except:
# for some reason, closing dev_id after adding a new PCI ID to new_id
# results in IOError. however, if the device was successfully bound,
# we don't care for any errors and can safely ignore IOError
tmp = get_pci_device_details(dev_id)
if "Driver_str" in tmp and tmp["Driver_str"] == driver:
return
print("Error: bind failed for %s - Cannot bind to driver %s"
% (dev_id, driver))
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
def unbind_all(dev_list, force=False):
"""Unbind method, takes a list of device locations"""
dev_list = map(dev_id_from_dev_name, dev_list)
for d in dev_list:
unbind_one(d, force)
def bind_all(dev_list, driver, force=False):
"""Bind method, takes a list of device locations"""
global devices
dev_list = map(dev_id_from_dev_name, dev_list)
for d in dev_list:
bind_one(d, driver, force)
# when binding devices to a generic driver (i.e. one that doesn't have a
# PCI ID table), some devices that are not bound to any other driver could
# be bound even if no one has asked them to. hence, we check the list of
# drivers again, and see if some of the previously-unbound devices were
# erroneously bound.
for d in devices.keys():
# skip devices that were already bound or that we know should be bound
if "Driver_str" in devices[d] or d in dev_list:
continue
# update information about this device
devices[d] = dict(devices[d].items() +
get_pci_device_details(d).items())
# check if updated information indicates that the device was bound
if "Driver_str" in devices[d]:
unbind_one(d, force)
def display_devices(title, dev_list, extra_params=None):
'''Displays to the user the details of a list of devices given in
"dev_list". The "extra_params" parameter, if given, should contain a string
with %()s fields in it for replacement by the named fields in each
device's dictionary.'''
strings = [] # this holds the strings to print. We sort before printing
print("\n%s" % title)
print("="*len(title))
if len(dev_list) == 0:
strings.append("<none>")
else:
for dev in dev_list:
if extra_params is not None:
strings.append("%s '%s' %s" % (dev["Slot"],
dev["Device_str"], extra_params % dev))
else:
strings.append("%s '%s'" % (dev["Slot"], dev["Device_str"]))
# sort before printing, so that the entries appear in PCI order
strings.sort()
print("\n".join(strings)) # print one per line
def show_status():
'''Function called when the script is passed the "--status" option.
Displays to the user what devices are bound to the igb_uio driver, the
kernel driver or to no driver'''
global dpdk_drivers
kernel_drv = []
dpdk_drv = []
no_drv = []
# split our list of devices into the three categories above
for d in devices.keys():
if not has_driver(d):
no_drv.append(devices[d])
continue
if devices[d]["Driver_str"] in dpdk_drivers:
dpdk_drv.append(devices[d])
else:
kernel_drv.append(devices[d])
# print each category separately, so we can clearly see what's used by DPDK
display_devices("Network devices using DPDK-compatible driver", dpdk_drv,
"drv=%(Driver_str)s unused=%(Module_str)s")
display_devices("Network devices using kernel driver", kernel_drv,
"if=%(Interface)s drv=%(Driver_str)s "
"unused=%(Module_str)s %(Active)s")
display_devices("Other network devices", no_drv, "unused=%(Module_str)s")
def parse_args():
'''Parses the command-line arguments given by the user and takes the
appropriate action for each'''
global b_flag
global status_flag
global force_flag
global args
if len(sys.argv) <= 1:
usage()
sys.exit(0)
try:
opts, args = getopt.getopt(sys.argv[1:], "b:us",
["help", "usage", "status", "force",
"bind=", "unbind"])
except getopt.GetoptError as error:
print(str(error))
print("Run '%s --usage' for further information" % sys.argv[0])
sys.exit(1)
for opt, arg in opts:
if opt == "--help" or opt == "--usage":
usage()
sys.exit(0)
if opt == "--status" or opt == "-s":
status_flag = True
if opt == "--force":
force_flag = True
if opt == "-b" or opt == "-u" or opt == "--bind" or opt == "--unbind":
if b_flag is not None:
print("Error - Only one bind or unbind may be specified\n")
sys.exit(1)
if opt == "-u" or opt == "--unbind":
b_flag = "none"
else:
b_flag = arg
def do_arg_actions():
'''do the actual action requested by the user'''
global b_flag
global status_flag
global force_flag
global args
if b_flag is None and not status_flag:
print("Error: No action specified for devices."
"Please give a -b or -u option")
print("Run '%s --usage' for further information" % sys.argv[0])
sys.exit(1)
if b_flag is not None and len(args) == 0:
print("Error: No devices specified.")
print("Run '%s --usage' for further information" % sys.argv[0])
sys.exit(1)
if b_flag == "none" or b_flag == "None":
unbind_all(args, force_flag)
elif b_flag is not None:
bind_all(args, b_flag, force_flag)
if status_flag:
if b_flag is not None:
get_nic_details() # refresh if we have changed anything
show_status()
def main():
'''program main function'''
parse_args()
check_modules()
get_nic_details()
do_arg_actions()
if __name__ == "__main__":
main()
|
gpl-2.0
| 6,224,372,971,309,642,000 | -5,807,711,799,847,311,000 | 35.258232 | 79 | 0.597438 | false |
blacklin/kbengine
|
kbe/src/lib/python/Lib/test/test_parser.py
|
113
|
26114
|
import parser
import unittest
import sys
import operator
import struct
from test import support
from test.script_helper import assert_python_failure
#
# First, we test that we can generate trees from valid source fragments,
# and that these valid trees are indeed allowed by the tree-loading side
# of the parser module.
#
class RoundtripLegalSyntaxTestCase(unittest.TestCase):
def roundtrip(self, f, s):
st1 = f(s)
t = st1.totuple()
try:
st2 = parser.sequence2st(t)
except parser.ParserError as why:
self.fail("could not roundtrip %r: %s" % (s, why))
self.assertEqual(t, st2.totuple(),
"could not re-generate syntax tree")
def check_expr(self, s):
self.roundtrip(parser.expr, s)
def test_flags_passed(self):
# The unicode literals flags has to be passed from the paser to AST
# generation.
suite = parser.suite("from __future__ import unicode_literals; x = ''")
code = suite.compile()
scope = {}
exec(code, {}, scope)
self.assertIsInstance(scope["x"], str)
def check_suite(self, s):
self.roundtrip(parser.suite, s)
def test_yield_statement(self):
self.check_suite("def f(): yield 1")
self.check_suite("def f(): yield")
self.check_suite("def f(): x += yield")
self.check_suite("def f(): x = yield 1")
self.check_suite("def f(): x = y = yield 1")
self.check_suite("def f(): x = yield")
self.check_suite("def f(): x = y = yield")
self.check_suite("def f(): 1 + (yield)*2")
self.check_suite("def f(): (yield 1)*2")
self.check_suite("def f(): return; yield 1")
self.check_suite("def f(): yield 1; return")
self.check_suite("def f(): yield from 1")
self.check_suite("def f(): x = yield from 1")
self.check_suite("def f(): f((yield from 1))")
self.check_suite("def f(): yield 1; return 1")
self.check_suite("def f():\n"
" for x in range(30):\n"
" yield x\n")
self.check_suite("def f():\n"
" if (yield):\n"
" yield x\n")
def test_nonlocal_statement(self):
self.check_suite("def f():\n"
" x = 0\n"
" def g():\n"
" nonlocal x\n")
self.check_suite("def f():\n"
" x = y = 0\n"
" def g():\n"
" nonlocal x, y\n")
def test_expressions(self):
self.check_expr("foo(1)")
self.check_expr("[1, 2, 3]")
self.check_expr("[x**3 for x in range(20)]")
self.check_expr("[x**3 for x in range(20) if x % 3]")
self.check_expr("[x**3 for x in range(20) if x % 2 if x % 3]")
self.check_expr("list(x**3 for x in range(20))")
self.check_expr("list(x**3 for x in range(20) if x % 3)")
self.check_expr("list(x**3 for x in range(20) if x % 2 if x % 3)")
self.check_expr("foo(*args)")
self.check_expr("foo(*args, **kw)")
self.check_expr("foo(**kw)")
self.check_expr("foo(key=value)")
self.check_expr("foo(key=value, *args)")
self.check_expr("foo(key=value, *args, **kw)")
self.check_expr("foo(key=value, **kw)")
self.check_expr("foo(a, b, c, *args)")
self.check_expr("foo(a, b, c, *args, **kw)")
self.check_expr("foo(a, b, c, **kw)")
self.check_expr("foo(a, *args, keyword=23)")
self.check_expr("foo + bar")
self.check_expr("foo - bar")
self.check_expr("foo * bar")
self.check_expr("foo / bar")
self.check_expr("foo // bar")
self.check_expr("lambda: 0")
self.check_expr("lambda x: 0")
self.check_expr("lambda *y: 0")
self.check_expr("lambda *y, **z: 0")
self.check_expr("lambda **z: 0")
self.check_expr("lambda x, y: 0")
self.check_expr("lambda foo=bar: 0")
self.check_expr("lambda foo=bar, spaz=nifty+spit: 0")
self.check_expr("lambda foo=bar, **z: 0")
self.check_expr("lambda foo=bar, blaz=blat+2, **z: 0")
self.check_expr("lambda foo=bar, blaz=blat+2, *y, **z: 0")
self.check_expr("lambda x, *y, **z: 0")
self.check_expr("(x for x in range(10))")
self.check_expr("foo(x for x in range(10))")
self.check_expr("...")
self.check_expr("a[...]")
def test_simple_expression(self):
# expr_stmt
self.check_suite("a")
def test_simple_assignments(self):
self.check_suite("a = b")
self.check_suite("a = b = c = d = e")
def test_simple_augmented_assignments(self):
self.check_suite("a += b")
self.check_suite("a -= b")
self.check_suite("a *= b")
self.check_suite("a /= b")
self.check_suite("a //= b")
self.check_suite("a %= b")
self.check_suite("a &= b")
self.check_suite("a |= b")
self.check_suite("a ^= b")
self.check_suite("a <<= b")
self.check_suite("a >>= b")
self.check_suite("a **= b")
def test_function_defs(self):
self.check_suite("def f(): pass")
self.check_suite("def f(*args): pass")
self.check_suite("def f(*args, **kw): pass")
self.check_suite("def f(**kw): pass")
self.check_suite("def f(foo=bar): pass")
self.check_suite("def f(foo=bar, *args): pass")
self.check_suite("def f(foo=bar, *args, **kw): pass")
self.check_suite("def f(foo=bar, **kw): pass")
self.check_suite("def f(a, b): pass")
self.check_suite("def f(a, b, *args): pass")
self.check_suite("def f(a, b, *args, **kw): pass")
self.check_suite("def f(a, b, **kw): pass")
self.check_suite("def f(a, b, foo=bar): pass")
self.check_suite("def f(a, b, foo=bar, *args): pass")
self.check_suite("def f(a, b, foo=bar, *args, **kw): pass")
self.check_suite("def f(a, b, foo=bar, **kw): pass")
self.check_suite("@staticmethod\n"
"def f(): pass")
self.check_suite("@staticmethod\n"
"@funcattrs(x, y)\n"
"def f(): pass")
self.check_suite("@funcattrs()\n"
"def f(): pass")
# keyword-only arguments
self.check_suite("def f(*, a): pass")
self.check_suite("def f(*, a = 5): pass")
self.check_suite("def f(*, a = 5, b): pass")
self.check_suite("def f(*, a, b = 5): pass")
self.check_suite("def f(*, a, b = 5, **kwds): pass")
self.check_suite("def f(*args, a): pass")
self.check_suite("def f(*args, a = 5): pass")
self.check_suite("def f(*args, a = 5, b): pass")
self.check_suite("def f(*args, a, b = 5): pass")
self.check_suite("def f(*args, a, b = 5, **kwds): pass")
# function annotations
self.check_suite("def f(a: int): pass")
self.check_suite("def f(a: int = 5): pass")
self.check_suite("def f(*args: list): pass")
self.check_suite("def f(**kwds: dict): pass")
self.check_suite("def f(*, a: int): pass")
self.check_suite("def f(*, a: int = 5): pass")
self.check_suite("def f() -> int: pass")
def test_class_defs(self):
self.check_suite("class foo():pass")
self.check_suite("class foo(object):pass")
self.check_suite("@class_decorator\n"
"class foo():pass")
self.check_suite("@class_decorator(arg)\n"
"class foo():pass")
self.check_suite("@decorator1\n"
"@decorator2\n"
"class foo():pass")
def test_import_from_statement(self):
self.check_suite("from sys.path import *")
self.check_suite("from sys.path import dirname")
self.check_suite("from sys.path import (dirname)")
self.check_suite("from sys.path import (dirname,)")
self.check_suite("from sys.path import dirname as my_dirname")
self.check_suite("from sys.path import (dirname as my_dirname)")
self.check_suite("from sys.path import (dirname as my_dirname,)")
self.check_suite("from sys.path import dirname, basename")
self.check_suite("from sys.path import (dirname, basename)")
self.check_suite("from sys.path import (dirname, basename,)")
self.check_suite(
"from sys.path import dirname as my_dirname, basename")
self.check_suite(
"from sys.path import (dirname as my_dirname, basename)")
self.check_suite(
"from sys.path import (dirname as my_dirname, basename,)")
self.check_suite(
"from sys.path import dirname, basename as my_basename")
self.check_suite(
"from sys.path import (dirname, basename as my_basename)")
self.check_suite(
"from sys.path import (dirname, basename as my_basename,)")
self.check_suite("from .bogus import x")
def test_basic_import_statement(self):
self.check_suite("import sys")
self.check_suite("import sys as system")
self.check_suite("import sys, math")
self.check_suite("import sys as system, math")
self.check_suite("import sys, math as my_math")
def test_relative_imports(self):
self.check_suite("from . import name")
self.check_suite("from .. import name")
# check all the way up to '....', since '...' is tokenized
# differently from '.' (it's an ellipsis token).
self.check_suite("from ... import name")
self.check_suite("from .... import name")
self.check_suite("from .pkg import name")
self.check_suite("from ..pkg import name")
self.check_suite("from ...pkg import name")
self.check_suite("from ....pkg import name")
def test_pep263(self):
self.check_suite("# -*- coding: iso-8859-1 -*-\n"
"pass\n")
def test_assert(self):
self.check_suite("assert alo < ahi and blo < bhi\n")
def test_with(self):
self.check_suite("with open('x'): pass\n")
self.check_suite("with open('x') as f: pass\n")
self.check_suite("with open('x') as f, open('y') as g: pass\n")
def test_try_stmt(self):
self.check_suite("try: pass\nexcept: pass\n")
self.check_suite("try: pass\nfinally: pass\n")
self.check_suite("try: pass\nexcept A: pass\nfinally: pass\n")
self.check_suite("try: pass\nexcept A: pass\nexcept: pass\n"
"finally: pass\n")
self.check_suite("try: pass\nexcept: pass\nelse: pass\n")
self.check_suite("try: pass\nexcept: pass\nelse: pass\n"
"finally: pass\n")
def test_position(self):
# An absolutely minimal test of position information. Better
# tests would be a big project.
code = "def f(x):\n return x + 1"
st1 = parser.suite(code)
st2 = st1.totuple(line_info=1, col_info=1)
def walk(tree):
node_type = tree[0]
next = tree[1]
if isinstance(next, tuple):
for elt in tree[1:]:
for x in walk(elt):
yield x
else:
yield tree
terminals = list(walk(st2))
self.assertEqual([
(1, 'def', 1, 0),
(1, 'f', 1, 4),
(7, '(', 1, 5),
(1, 'x', 1, 6),
(8, ')', 1, 7),
(11, ':', 1, 8),
(4, '', 1, 9),
(5, '', 2, -1),
(1, 'return', 2, 4),
(1, 'x', 2, 11),
(14, '+', 2, 13),
(2, '1', 2, 15),
(4, '', 2, 16),
(6, '', 2, -1),
(4, '', 2, -1),
(0, '', 2, -1)],
terminals)
def test_extended_unpacking(self):
self.check_suite("*a = y")
self.check_suite("x, *b, = m")
self.check_suite("[*a, *b] = y")
self.check_suite("for [*x, b] in x: pass")
def test_raise_statement(self):
self.check_suite("raise\n")
self.check_suite("raise e\n")
self.check_suite("try:\n"
" suite\n"
"except Exception as e:\n"
" raise ValueError from e\n")
def test_set_displays(self):
self.check_expr('{2}')
self.check_expr('{2,}')
self.check_expr('{2, 3}')
self.check_expr('{2, 3,}')
def test_dict_displays(self):
self.check_expr('{}')
self.check_expr('{a:b}')
self.check_expr('{a:b,}')
self.check_expr('{a:b, c:d}')
self.check_expr('{a:b, c:d,}')
def test_set_comprehensions(self):
self.check_expr('{x for x in seq}')
self.check_expr('{f(x) for x in seq}')
self.check_expr('{f(x) for x in seq if condition(x)}')
def test_dict_comprehensions(self):
self.check_expr('{x:x for x in seq}')
self.check_expr('{x**2:x[3] for x in seq if condition(x)}')
self.check_expr('{x:x for x in seq1 for y in seq2 if condition(x, y)}')
#
# Second, we take *invalid* trees and make sure we get ParserError
# rejections for them.
#
class IllegalSyntaxTestCase(unittest.TestCase):
def check_bad_tree(self, tree, label):
try:
parser.sequence2st(tree)
except parser.ParserError:
pass
else:
self.fail("did not detect invalid tree for %r" % label)
def test_junk(self):
# not even remotely valid:
self.check_bad_tree((1, 2, 3), "<junk>")
def test_illegal_yield_1(self):
# Illegal yield statement: def f(): return 1; yield 1
tree = \
(257,
(264,
(285,
(259,
(1, 'def'),
(1, 'f'),
(260, (7, '('), (8, ')')),
(11, ':'),
(291,
(4, ''),
(5, ''),
(264,
(265,
(266,
(272,
(275,
(1, 'return'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302, (303, (304, (305, (2, '1')))))))))))))))))),
(264,
(265,
(266,
(272,
(276,
(1, 'yield'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302,
(303, (304, (305, (2, '1')))))))))))))))))),
(4, ''))),
(6, ''))))),
(4, ''),
(0, ''))))
self.check_bad_tree(tree, "def f():\n return 1\n yield 1")
def test_illegal_yield_2(self):
# Illegal return in generator: def f(): return 1; yield 1
tree = \
(257,
(264,
(265,
(266,
(278,
(1, 'from'),
(281, (1, '__future__')),
(1, 'import'),
(279, (1, 'generators')))),
(4, ''))),
(264,
(285,
(259,
(1, 'def'),
(1, 'f'),
(260, (7, '('), (8, ')')),
(11, ':'),
(291,
(4, ''),
(5, ''),
(264,
(265,
(266,
(272,
(275,
(1, 'return'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302, (303, (304, (305, (2, '1')))))))))))))))))),
(264,
(265,
(266,
(272,
(276,
(1, 'yield'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302,
(303, (304, (305, (2, '1')))))))))))))))))),
(4, ''))),
(6, ''))))),
(4, ''),
(0, ''))))
self.check_bad_tree(tree, "def f():\n return 1\n yield 1")
def test_a_comma_comma_c(self):
# Illegal input: a,,c
tree = \
(258,
(311,
(290,
(291,
(292,
(293,
(295,
(296,
(297,
(298, (299, (300, (301, (302, (303, (1, 'a')))))))))))))),
(12, ','),
(12, ','),
(290,
(291,
(292,
(293,
(295,
(296,
(297,
(298, (299, (300, (301, (302, (303, (1, 'c'))))))))))))))),
(4, ''),
(0, ''))
self.check_bad_tree(tree, "a,,c")
def test_illegal_operator(self):
# Illegal input: a $= b
tree = \
(257,
(264,
(265,
(266,
(267,
(312,
(291,
(292,
(293,
(294,
(296,
(297,
(298,
(299,
(300, (301, (302, (303, (304, (1, 'a'))))))))))))))),
(268, (37, '$=')),
(312,
(291,
(292,
(293,
(294,
(296,
(297,
(298,
(299,
(300, (301, (302, (303, (304, (1, 'b'))))))))))))))))),
(4, ''))),
(0, ''))
self.check_bad_tree(tree, "a $= b")
def test_malformed_global(self):
#doesn't have global keyword in ast
tree = (257,
(264,
(265,
(266,
(282, (1, 'foo'))), (4, ''))),
(4, ''),
(0, ''))
self.check_bad_tree(tree, "malformed global ast")
def test_missing_import_source(self):
# from import fred
tree = \
(257,
(268,
(269,
(270,
(282,
(284, (1, 'from'), (1, 'import'),
(287, (285, (1, 'fred')))))),
(4, ''))),
(4, ''), (0, ''))
self.check_bad_tree(tree, "from import fred")
class CompileTestCase(unittest.TestCase):
# These tests are very minimal. :-(
def test_compile_expr(self):
st = parser.expr('2 + 3')
code = parser.compilest(st)
self.assertEqual(eval(code), 5)
def test_compile_suite(self):
st = parser.suite('x = 2; y = x + 3')
code = parser.compilest(st)
globs = {}
exec(code, globs)
self.assertEqual(globs['y'], 5)
def test_compile_error(self):
st = parser.suite('1 = 3 + 4')
self.assertRaises(SyntaxError, parser.compilest, st)
def test_compile_badunicode(self):
st = parser.suite('a = "\\U12345678"')
self.assertRaises(SyntaxError, parser.compilest, st)
st = parser.suite('a = "\\u1"')
self.assertRaises(SyntaxError, parser.compilest, st)
def test_issue_9011(self):
# Issue 9011: compilation of an unary minus expression changed
# the meaning of the ST, so that a second compilation produced
# incorrect results.
st = parser.expr('-3')
code1 = parser.compilest(st)
self.assertEqual(eval(code1), -3)
code2 = parser.compilest(st)
self.assertEqual(eval(code2), -3)
class ParserStackLimitTestCase(unittest.TestCase):
"""try to push the parser to/over its limits.
see http://bugs.python.org/issue1881 for a discussion
"""
def _nested_expression(self, level):
return "["*level+"]"*level
def test_deeply_nested_list(self):
# XXX used to be 99 levels in 2.x
e = self._nested_expression(93)
st = parser.expr(e)
st.compile()
def test_trigger_memory_error(self):
e = self._nested_expression(100)
rc, out, err = assert_python_failure('-c', e)
# parsing the expression will result in an error message
# followed by a MemoryError (see #11963)
self.assertIn(b's_push: parser stack overflow', err)
self.assertIn(b'MemoryError', err)
class STObjectTestCase(unittest.TestCase):
"""Test operations on ST objects themselves"""
def test_comparisons(self):
# ST objects should support order and equality comparisons
st1 = parser.expr('2 + 3')
st2 = parser.suite('x = 2; y = x + 3')
st3 = parser.expr('list(x**3 for x in range(20))')
st1_copy = parser.expr('2 + 3')
st2_copy = parser.suite('x = 2; y = x + 3')
st3_copy = parser.expr('list(x**3 for x in range(20))')
# exercise fast path for object identity
self.assertEqual(st1 == st1, True)
self.assertEqual(st2 == st2, True)
self.assertEqual(st3 == st3, True)
# slow path equality
self.assertEqual(st1, st1_copy)
self.assertEqual(st2, st2_copy)
self.assertEqual(st3, st3_copy)
self.assertEqual(st1 == st2, False)
self.assertEqual(st1 == st3, False)
self.assertEqual(st2 == st3, False)
self.assertEqual(st1 != st1, False)
self.assertEqual(st2 != st2, False)
self.assertEqual(st3 != st3, False)
self.assertEqual(st1 != st1_copy, False)
self.assertEqual(st2 != st2_copy, False)
self.assertEqual(st3 != st3_copy, False)
self.assertEqual(st2 != st1, True)
self.assertEqual(st1 != st3, True)
self.assertEqual(st3 != st2, True)
# we don't particularly care what the ordering is; just that
# it's usable and self-consistent
self.assertEqual(st1 < st2, not (st2 <= st1))
self.assertEqual(st1 < st3, not (st3 <= st1))
self.assertEqual(st2 < st3, not (st3 <= st2))
self.assertEqual(st1 < st2, st2 > st1)
self.assertEqual(st1 < st3, st3 > st1)
self.assertEqual(st2 < st3, st3 > st2)
self.assertEqual(st1 <= st2, st2 >= st1)
self.assertEqual(st3 <= st1, st1 >= st3)
self.assertEqual(st2 <= st3, st3 >= st2)
# transitivity
bottom = min(st1, st2, st3)
top = max(st1, st2, st3)
mid = sorted([st1, st2, st3])[1]
self.assertTrue(bottom < mid)
self.assertTrue(bottom < top)
self.assertTrue(mid < top)
self.assertTrue(bottom <= mid)
self.assertTrue(bottom <= top)
self.assertTrue(mid <= top)
self.assertTrue(bottom <= bottom)
self.assertTrue(mid <= mid)
self.assertTrue(top <= top)
# interaction with other types
self.assertEqual(st1 == 1588.602459, False)
self.assertEqual('spanish armada' != st2, True)
self.assertRaises(TypeError, operator.ge, st3, None)
self.assertRaises(TypeError, operator.le, False, st1)
self.assertRaises(TypeError, operator.lt, st1, 1815)
self.assertRaises(TypeError, operator.gt, b'waterloo', st2)
check_sizeof = support.check_sizeof
@support.cpython_only
def test_sizeof(self):
def XXXROUNDUP(n):
if n <= 1:
return n
if n <= 128:
return (n + 3) & ~3
return 1 << (n - 1).bit_length()
basesize = support.calcobjsize('Pii')
nodesize = struct.calcsize('hP3iP0h')
def sizeofchildren(node):
if node is None:
return 0
res = 0
hasstr = len(node) > 1 and isinstance(node[-1], str)
if hasstr:
res += len(node[-1]) + 1
children = node[1:-1] if hasstr else node[1:]
if children:
res += XXXROUNDUP(len(children)) * nodesize
for child in children:
res += sizeofchildren(child)
return res
def check_st_sizeof(st):
self.check_sizeof(st, basesize + nodesize +
sizeofchildren(st.totuple()))
check_st_sizeof(parser.expr('2 + 3'))
check_st_sizeof(parser.expr('2 + 3 + 4'))
check_st_sizeof(parser.suite('x = 2 + 3'))
check_st_sizeof(parser.suite(''))
check_st_sizeof(parser.suite('# -*- coding: utf-8 -*-'))
check_st_sizeof(parser.expr('[' + '2,' * 1000 + ']'))
# XXX tests for pickling and unpickling of ST objects should go here
class OtherParserCase(unittest.TestCase):
def test_two_args_to_expr(self):
# See bug #12264
with self.assertRaises(TypeError):
parser.expr("a", "b")
def test_main():
support.run_unittest(
RoundtripLegalSyntaxTestCase,
IllegalSyntaxTestCase,
CompileTestCase,
ParserStackLimitTestCase,
STObjectTestCase,
OtherParserCase,
)
if __name__ == "__main__":
test_main()
|
lgpl-3.0
| 7,136,389,823,487,868,000 | 982,259,298,855,129,100 | 34.052349 | 79 | 0.467833 | false |
kritak/textdungeon
|
Internal/pricerandomtester.py
|
1
|
1114
|
"""testing random frequency of items based on price for item.
a cheap item is more common, a expensive item is very rare"""
import random
d = {"healing":50,
"berserk":60,
"clever":100,
"swiftness":100,
"might":100,
"awesomeness":500,
}
# reverse d
dr = [[1/b,a] for [a,b] in d.items()] # list of [price, drinkname]
dr.sort() # sort this list by price
pricelist1 = [a for [a,b] in dr] # list of price only
drinklist = [b for [a,b] in dr] # list of drinkname only
pricelist2 = [] # list of added up prices
kprice = 0
for p in pricelist1:
kprice += p
pricelist2.append(kprice)
print(pricelist1, pricelist2)
result = {}
print("calculating please wait...")
for x in range(10000):
y = random.random()*(pricelist2[-1]) # 1 to maxprice
for p in pricelist2:
if y < p:
drinkname = drinklist[pricelist2.index(p)]
if drinkname in result:
result[drinkname] += 1
else:
result[drinkname] = 1
break
print(result)
|
gpl-2.0
| -7,923,231,568,049,648,000 | 2,831,592,949,048,845,000 | 24.906977 | 66 | 0.561939 | false |
naototty/pyflag
|
src/plugins_old/MemoryForensics/VolatilityCommon.py
|
7
|
3679
|
import os,sys
import pyflag.IO as IO
import pyflag.FlagFramework as FlagFramework
## Find and insert the volatility modules
volatility_path = None
for d in os.listdir(os.path.dirname(__file__)):
if d.startswith("Volatility-1.3"):
## Check that volatility is actually in there
path = os.path.join(os.path.dirname(__file__),d)
if os.access(os.path.join(path,"vtypes.py"),os.F_OK):
volatility_path = path
break
## We need to make sure that we get in before an older version
if volatility_path and volatility_path not in sys.path:
sys.path.insert(0,volatility_path)
import forensics.addrspace
## This is a big hack because Volatility is difficult to work with -
## we want to pass Volatility an already made address space but there
## is no way to do this. Volatility calls the FileAddressSpace in
## multiple places and actually tries to open the raw file several
## times. We would essentially need to recode all the volatility
## functions to accept a ready address space.
## But after all, this is python so we can do lots of magic. We
## basically dynamically change the FileAddressSpace definition in
## volatility itself (which normally accepts a filename) to accept an
## IOSource name, then we effectively call it with the name as an
## image name. When volatility tries to open the said filename, it
## will be transparently opening a PyFlag iosource of our choosing.
try:
forensics.addrspace.FileAddressSpace.case
except AttributeError:
## Only do this if its not done already
class IOSourceAddressSpace(forensics.addrspace.FileAddressSpace):
case = None
iosource = None
def __init__(self, name, mode='rb', fast=False):
self.case, self.iosource = name.split("/",1)
fd = IO.open(self.case, self.iosource)
self.fhandle = fd
self.fsize = fd.size
self.fast_fhandle = fd
self.fname = name
self.name = name
## Patch it in:
forensics.addrspace.FileAddressSpace = IOSourceAddressSpace
## We need to reimplement these functions in a sane way (Currently
## they try to open the file directly)
import vutils
def is_crash_dump(filename):
fd = forensics.addrspace.FileAddressSpace(filename)
if fd.read(0, 8) == "PAGEDUMP":
return True
return False
def is_hiberfil(filename):
fd = forensics.addrspace.FileAddressSpace(filename)
if fd.read(0, 4) == 'hibr':
return True
return False
vutils.is_crash_dump = is_crash_dump
vutils.is_hiberfil = is_hiberfil
## Make sure we initialise Volatility plugins
import forensics.registry as MemoryRegistry
MemoryRegistry.Init()
## These are common column types
from pyflag.ColumnTypes import BigIntegerType
class MemoryOffset(BigIntegerType):
inode_id_column = "Inode"
""" A column type to link to the offset of an inode """
def plain_display_hook(self, value, row, result):
offset = int(value)
inode_id = row[self.inode_id_column]
target = FlagFramework.query_type(family="Disk Forensics",
report="ViewFile",
offset=value,
inode_id=inode_id,
case=self.case,
mode="HexDump")
try:
target['_prebuffer'] = self.prebuffer
except AttributeError: pass
result.link("0x%08X" % offset, target=target, pane='new')
display_hooks = [ plain_display_hook ]
|
gpl-2.0
| 8,229,102,632,975,477,000 | -4,294,478,697,774,036,000 | 35.79 | 70 | 0.64012 | false |
SPARLab/BikeMaps
|
mapApp/views/__init__.py
|
1
|
1138
|
from .about import about, contact
from .alerts import alertUsers, postAlertPolygon, readAlertPoint
from .disclaimer import disclaimer
from .edit import editHazards, editShape, updateHazard
from .index import index
from .postPoint import (postHazard, postIncident, postNearmiss,
postNewInfrastructure, postTheft)
from .pushNotification import pushNotification
from .recentReports import recentReports
from .restApi import (AlertAreaDetail, AlertAreaList, APNSDeviceDetail,
APNSDeviceList, CollisionList, FilteredHazardList,
FilteredTheftList, GCMDeviceDetail, GCMDeviceList,
HazardList, IncidentList, NearmissList, OfficialList,
TheftList, TinyCollisionList, TinyHazardList,
TinyNearMissList, TinyNewInfrastructureList,
TinyTheftList, UserDetail, UserList, XHRCollisionInfo,
XHRHazardInfo, XHRNearMissInfo, XHRNewInfrastructureInfo,
XHRTheftInfo)
from .termsAndConditions import termsAndConditions
from .vis import vis
|
mit
| -4,138,076,837,048,177,700 | -286,802,703,473,359,140 | 54.9 | 79 | 0.695079 | false |
JulianAtGitHub/CocosBuilderExtend
|
CocosBuilder/libs/cocos2d-iphone/tools/template_generator.py
|
46
|
8351
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Xcode 4 template generator for cocos2d project
# (c) 2011 Ricardo Quesada
#
# LICENSE: Dual License: MIT & GNU GPL v2 Whatever suits you best.
#
# Given a directory, it generates the "Definitions" and "Nodes" elements
#
# Format taken from: http://blog.boreal-kiss.net/2011/03/11/a-minimal-project-template-for-xcode-4/
# ----------------------------------------------------------------------------
'''
Xcode 4 template generator
'''
__docformat__ = 'restructuredtext'
_template_open_body = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<!-- FILE AUTOGENERATED BY cocos2d/tools/template_generator.py DO NOT EDIT -->
<plist version="1.0">
<dict>
<key>Description</key>
<string>This is a template description.</string>
<key>Identifier</key>
<string>com.cocos2d-v2.%s</string>
<key>Kind</key>
<string>Xcode.Xcode3.ProjectTemplateUnitKind</string>"""
_template_close_body = "</dict>\n</plist>"
_template_header_path= """<key>Targets</key>
<array>
<dict>
<key>SharedSettings</key>
<dict>
<key>HEADER_SEARCH_PATHS</key>
<string>%s</string>
</dict>
</dict>
</array>"""
_template_user_header_path= """<key>Targets</key>
<array>
<dict>
<key>SharedSettings</key>
<dict>
<key>ALWAYS_SEARCH_USER_PATHS</key>
<string>YES</string>
<key>USER_HEADER_SEARCH_PATHS</key>
<string>%s</string>
</dict>
</dict>
</array>"""
_template_ancestor = """ <key>Ancestors</key>
<array>
<string>%s</string>
</array>"""
# python
import sys
import os
import getopt
import glob
class Xcode4Template(object):
def __init__(self, directory, group=0, identifier="XXX", header_path=None, user_header_path=None, ancestor=None):
self.directory = directory
self.files_to_include = []
self.wildcard = '*'
self.ignore_extensions = ['h','txt','html','patch','cmake', 'py', 'markdown', 'md', 'graffle', 'sh', 'ini', 'bridgesupport', 'tbl', 'msg']
self.ignore_directories = ['docs', 'html']
self.group_start_index = group # eg: if 1 then libs/cocos2d/support -> ["cocos2d", "support"] ignoring "libs"
self.output = []
self.identifier = identifier
self.header_path = header_path
self.user_header_path = user_header_path
self.ancestor = ancestor
def scandirs(self, path):
for currentFile in glob.glob(os.path.join(path, self.wildcard)):
if os.path.isdir(currentFile):
self.scandirs(currentFile)
else:
self.files_to_include.append(currentFile)
#
# append the definitions
#
def append_definition(self, output_body, path, group, dont_index):
output_body.append("\t\t<key>%s</key>" % path)
output_body.append("\t\t<dict>")
if group:
output_body.append("\t\t\t<key>Group</key>")
output_body.append("\t\t\t<array>")
for g in group:
output_body.append("\t\t\t\t<string>%s</string>" % g)
output_body.append("\t\t\t</array>")
output_body.append("\t\t\t<key>Path</key>\n\t\t\t<string>%s</string>" % path)
if dont_index:
output_body.append("\t\t\t<key>TargetIndices</key>\n\t\t\t<array/>")
output_body.append("\t\t</dict>")
#
# Generate the "Definitions" section
#
def generate_definitions(self):
output_header = "\t<key>Definitions</key>"
output_dict_open = "\t<dict>"
output_dict_close = "\t</dict>"
output_body = []
for path in self.files_to_include:
# group name
group = []
# obtain group name from directory
dirs = os.path.dirname(path)
lastdir = dirs.split(os.path.sep)[-1]
if lastdir in self.ignore_directories:
sys.stderr.write('Ignoring definition: "%s" because it is in directory: "%s"\n' % (os.path.basename(path), lastdir))
continue
group = dirs.split('/')
group = group[self.group_start_index:]
# get the extension
filename = os.path.basename(path)
name_extension= filename.split('.')
extension = None
if len(name_extension) == 2:
extension = name_extension[1]
self.append_definition(output_body, path, group, extension in self.ignore_extensions)
self.output.append(output_header)
self.output.append(output_dict_open)
self.output.append("\n".join(output_body))
self.output.append(output_dict_close)
#
# Generates the "Nodes" section
#
def generate_nodes(self):
output_header = "\t<key>Nodes</key>"
output_open = "\t<array>"
output_close = "\t</array>"
output_body = []
for path in self.files_to_include:
lastdir = os.path.dirname(path).split(os.path.sep)[-1]
if lastdir in self.ignore_directories:
sys.stderr.write('Ignoring node: "%s" because it is in directory: "%s"\n' % (os.path.basename(path), lastdir))
continue
output_body.append("\t\t<string>%s</string>" % path)
self.output.append(output_header)
self.output.append(output_open)
self.output.append("\n".join(output_body))
self.output.append(output_close)
#
# Generate ancestors
#
def generate_ancestor(self):
if self.ancestor:
self.output.append(_template_ancestor % self.ancestor)
#
# Generates the include directory
#
def generate_header_path(self):
if self.header_path:
self.output.append(_template_header_path % self.header_path)
if self.user_header_path:
self.output.append(_template_user_header_path % self.user_header_path)
#
# Generates the plist. Send it to to stdout
#
def generate_xml(self):
self.output.append(_template_open_body % self.identifier)
self.generate_ancestor()
self.generate_definitions()
self.generate_nodes()
self.generate_header_path()
self.output.append(_template_close_body)
print "\n".join(self.output)
def generate(self):
self.scandirs(self.directory)
self.generate_xml()
def help():
print "%s v1.1 - An utility to generate Xcode 4 templates" % sys.argv[0]
print "Usage:"
print "-g --group\t\tdirectory_used_as_starting_group (if 1, then 'libs/cocos2d/Support/' -> ['cocos2d','Support'] ignoring 'libs')"
print "-i --identifier\t\tidentifier (Xcode4 template identifier)"
print "-a --ancestor\t\tancestor identifier. Default: none"
print "--header-path\t\theader search path"
print "--user-header-path\tuser header search path"
print "directory_to_parse"
print "\nExample:"
print "\t%s -i kazmathlib --header-path ___PACKAGENAME___/libs/kazmath/include libs" % sys.argv[0]
print "\t%s -i cocos2dlib libs" % sys.argv[0]
sys.exit(-1)
if __name__ == "__main__":
if len(sys.argv) == 1:
help()
directory = None
group = 0
identifier = None
header_path= None
user_header_path= None
ancestor = None
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "a:g:i:", ["ancestor=","group=","identifier=","header-path=", "user-header-path="])
if len(args) == 0:
help()
for opt, arg in opts:
if opt in ("-g","--group"):
group = arg
if opt in ("-i","--identifier"):
identifier = arg
if opt in ["--header-path"]:
header_path= arg
if opt in ["--user-header-path"]:
user_header_path= arg
if opt in ("-a", "--ancestor"):
ancestor = arg
except getopt.GetoptError,e:
print e
directory = args[0]
if directory == None:
help()
gen = Xcode4Template(directory=directory, group=int(group), identifier=identifier, header_path=header_path, user_header_path=user_header_path, ancestor=ancestor)
gen.generate()
|
mit
| -3,885,548,080,653,279,700 | 2,346,572,012,631,408,000 | 30.996169 | 165 | 0.579212 | false |
unt-libraries/django-name
|
name/api/serializers.py
|
1
|
6208
|
"""Serializers for the Name App Models.
This module leverages the Django Rest Framework's Serializer
components to build JSON representations of the models defined
in this app.
These JSON representations are designed to be backwards compatible
with the API documented in previous versions.
For documentation regarding the Django Rest Framework Serializers go
to http://www.django-rest-framework.org/api-guide/serializers/
"""
from rest_framework import serializers
from .. import models
class IdentifierSerializer(serializers.ModelSerializer):
"""Serializer for the Identifier Model.
The following fields have been renamed for backwards compatibility
with previous versions of the API.
label -> identifier.type
href -> identifier.value
"""
label = serializers.StringRelatedField(source='type')
href = serializers.CharField(source='value')
class Meta:
model = models.Identifier
fields = ('label', 'href')
class NoteSerializer(serializers.ModelSerializer):
"""Serializer for the Note Model."""
type = serializers.SerializerMethodField()
class Meta:
model = models.Note
fields = ('note', 'type')
def get_type(self, obj):
"""Sets the type field.
Returns the Note Type label, instead of the Note Type ID, which
is the default behavior.
"""
return obj.get_note_type_label().lower()
class VariantSerializer(serializers.ModelSerializer):
"""Serializer for the Variant Model."""
type = serializers.SerializerMethodField()
class Meta:
model = models.Variant
fields = ('variant', 'type')
def get_type(self, obj):
"""Sets the type field.
Returns the Variant Type label, instead of the Variant Type ID,
which is the default behavior.
"""
return obj.get_variant_type_label().lower()
class NameSerializer(serializers.ModelSerializer):
"""Serializer for the Name Model.
This serializes the the Name model to include detailed information
about the object, including the related Variants, Notes, and
Identifiers.
The following fields have been renamed for backwards compatibility
with previous versions of the API.
authoritative_name -> name.name
begin_date -> name.begin
end_date -> name.end
The identifier field is the absolute url to the name detail
page for the model instance.
"""
authoritative_name = serializers.CharField(source='name')
begin_date = serializers.CharField(source='begin')
name_type = serializers.SerializerMethodField()
end_date = serializers.CharField(source='end')
links = IdentifierSerializer(many=True, source='identifier_set')
notes = NoteSerializer(many=True, source='note_set')
variants = VariantSerializer(many=True, source='variant_set')
identifier = serializers.HyperlinkedIdentityField(
view_name='name:detail', lookup_field='name_id')
class Meta:
model = models.Name
fields = ('authoritative_name', 'name_type', 'begin_date', 'end_date',
'identifier', 'links', 'notes', 'variants',)
def get_name_type(self, obj):
"""Sets the name_type field.
Returns the Name Type label, instead of the Name Type ID, which
is the default behavior.
"""
return obj.get_name_type_label().lower()
class NameSearchSerializer(serializers.ModelSerializer):
"""Name Model Serializer for the Name search/autocompletion
endpoint.
The following fields have been renamed for backwards compatibility
with previous versions of the API.
begin_date -> name.begin
type -> name.get_name_type_label()
label -> Formats name.name and name.disambiguation.
The URL field is the absolute url to the name detail page for
the model instance.
"""
begin_date = serializers.CharField(source='begin')
type = serializers.SerializerMethodField()
label = serializers.SerializerMethodField()
URL = serializers.HyperlinkedIdentityField(
view_name='name:detail', lookup_field='name_id')
class Meta:
model = models.Name
fields = ('id', 'name', 'label', 'type', 'begin_date',
'disambiguation', 'URL')
def get_type(self, obj):
"""Sets the type field.
Returns the Name Type label, instead of the Name Type ID, which
is the default behavior.
"""
return obj.get_name_type_label().lower()
def get_label(self, obj):
"""Sets the label field.
Returns a string in the form of
"<name.name> (<name.disambiguation>)"
"""
if obj.disambiguation:
return '{0} ({1})'.format(obj.name, obj.disambiguation)
return obj.name
class LocationSerializer(serializers.ModelSerializer):
"""Serailizer for the Locations Model.
This includes the related Name via the belong_to_name field. The
belong_to_name field uses the NameSerializer to nest the related
Name model.
"""
belong_to_name = NameSerializer()
class Meta:
model = models.Location
fields = '__all__'
class NameStatisticsMonthSerializer(serializers.Serializer):
"""Serializer for the NameStatisticsMonth object."""
total = serializers.IntegerField()
total_to_date = serializers.IntegerField()
month = serializers.DateTimeField()
class NameStatisticsTypeSerializer(serializers.Serializer):
"""Serializer for the NameStatisticsType object.
This serializer utilizes the NameStatisticsTypeMonth to serialize
the NameStatisticsMonth instances that the object instance contains.
"""
running_total = serializers.IntegerField()
stats = NameStatisticsMonthSerializer(many=True)
class NameStatisticsSerializer(serializers.Serializer):
"""Serializer for the NameStatistics object.
This serializer utilizes the NameStatisticsTypeSerializer to
serialize the NameStatisticsType instances that the object instance
contains.
"""
created = NameStatisticsTypeSerializer()
modified = NameStatisticsTypeSerializer()
name_type_totals = serializers.DictField()
|
bsd-3-clause
| -658,798,574,419,687,200 | 5,119,492,834,971,211,000 | 31.502618 | 78 | 0.6875 | false |
ychen820/microblog
|
y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/ec2/autoscale/policy.py
|
13
|
6223
|
# Copyright (c) 2009-2010 Reza Lotun http://reza.lotun.name/
# Copyright (c) 2011 Jann Kleen
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.resultset import ResultSet
from boto.ec2.elb.listelement import ListElement
class Alarm(object):
def __init__(self, connection=None):
self.connection = connection
self.name = None
self.alarm_arn = None
def __repr__(self):
return 'Alarm:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'AlarmName':
self.name = value
elif name == 'AlarmARN':
self.alarm_arn = value
else:
setattr(self, name, value)
class AdjustmentType(object):
def __init__(self, connection=None):
self.connection = connection
self.adjustment_type = None
def __repr__(self):
return 'AdjustmentType:%s' % self.adjustment_type
def startElement(self, name, attrs, connection):
return
def endElement(self, name, value, connection):
if name == 'AdjustmentType':
self.adjustment_type = value
return
class MetricCollectionTypes(object):
class BaseType(object):
arg = ''
def __init__(self, connection):
self.connection = connection
self.val = None
def __repr__(self):
return '%s:%s' % (self.arg, self.val)
def startElement(self, name, attrs, connection):
return
def endElement(self, name, value, connection):
if name == self.arg:
self.val = value
class Metric(BaseType):
arg = 'Metric'
class Granularity(BaseType):
arg = 'Granularity'
def __init__(self, connection=None):
self.connection = connection
self.metrics = []
self.granularities = []
def __repr__(self):
return 'MetricCollectionTypes:<%s, %s>' % (self.metrics, self.granularities)
def startElement(self, name, attrs, connection):
if name == 'Granularities':
self.granularities = ResultSet([('member', self.Granularity)])
return self.granularities
elif name == 'Metrics':
self.metrics = ResultSet([('member', self.Metric)])
return self.metrics
def endElement(self, name, value, connection):
return
class ScalingPolicy(object):
def __init__(self, connection=None, **kwargs):
"""
Scaling Policy
:type name: str
:param name: Name of scaling policy.
:type adjustment_type: str
:param adjustment_type: Specifies the type of adjustment. Valid values are `ChangeInCapacity`, `ExactCapacity` and `PercentChangeInCapacity`.
:type as_name: str or int
:param as_name: Name or ARN of the Auto Scaling Group.
:type scaling_adjustment: int
:param scaling_adjustment: Value of adjustment (type specified in `adjustment_type`).
:type min_adjustment_step: int
:param min_adjustment_step: Value of min adjustment step required to
apply the scaling policy (only make sense when use `PercentChangeInCapacity` as adjustment_type.).
:type cooldown: int
:param cooldown: Time (in seconds) before Alarm related Scaling Activities can start after the previous Scaling Activity ends.
"""
self.name = kwargs.get('name', None)
self.adjustment_type = kwargs.get('adjustment_type', None)
self.as_name = kwargs.get('as_name', None)
self.scaling_adjustment = kwargs.get('scaling_adjustment', None)
self.cooldown = kwargs.get('cooldown', None)
self.connection = connection
self.min_adjustment_step = kwargs.get('min_adjustment_step', None)
def __repr__(self):
return 'ScalingPolicy(%s group:%s adjustment:%s)' % (self.name,
self.as_name,
self.adjustment_type)
def startElement(self, name, attrs, connection):
if name == 'Alarms':
self.alarms = ResultSet([('member', Alarm)])
return self.alarms
def endElement(self, name, value, connection):
if name == 'PolicyName':
self.name = value
elif name == 'AutoScalingGroupName':
self.as_name = value
elif name == 'PolicyARN':
self.policy_arn = value
elif name == 'ScalingAdjustment':
self.scaling_adjustment = int(value)
elif name == 'Cooldown':
self.cooldown = int(value)
elif name == 'AdjustmentType':
self.adjustment_type = value
elif name == 'MinAdjustmentStep':
self.min_adjustment_step = int(value)
def delete(self):
return self.connection.delete_policy(self.name, self.as_name)
class TerminationPolicies(list):
def __init__(self, connection=None, **kwargs):
pass
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'member':
self.append(value)
|
bsd-3-clause
| 778,116,182,696,612,700 | -4,373,380,596,070,613,000 | 34.764368 | 149 | 0.624618 | false |
ecugol/django-geoip
|
django_geoip/vendor/progressbar/progressbar.py
|
3
|
9159
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# progressbar - Text progress bar library for Python.
# Copyright (c) 2005 Nilton Volpato
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Main ProgressBar class."""
from __future__ import division
import math
import os
import signal
import sys
import time
from . import widgets
try:
from fcntl import ioctl
from array import array
import termios
except ImportError:
pass
class UnknownLength: pass
class ProgressBar(object):
"""The ProgressBar class which updates and prints the bar.
A common way of using it is like:
>>> pbar = ProgressBar().start()
>>> for i in range(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
You can also use a ProgressBar as an iterator:
>>> progress = ProgressBar()
>>> for i in progress(some_iterable):
... # do something
...
Since the progress bar is incredibly customizable you can specify
different widgets of any type in any order. You can even write your own
widgets! However, since there are already a good number of widgets you
should probably play around with them before moving on to create your own
widgets.
The term_width parameter represents the current terminal width. If the
parameter is set to an integer then the progress bar will use that,
otherwise it will attempt to determine the terminal width falling back to
80 columns if the width cannot be determined.
When implementing a widget's update method you are passed a reference to
the current progress bar. As a result, you have access to the
ProgressBar's methods and attributes. Although there is nothing preventing
you from changing the ProgressBar you should treat it as read only.
Useful methods and attributes include (Public API):
- currval: current progress (0 <= currval <= maxval)
- maxval: maximum (and final) value
- finished: True if the bar has finished (reached 100%)
- start_time: the time when start() method of ProgressBar was called
- seconds_elapsed: seconds elapsed since start_time and last call to
update
- percentage(): progress in percent [0..100]
"""
__slots__ = ('currval', 'fd', 'finished', 'last_update_time',
'left_justify', 'maxval', 'next_update', 'num_intervals',
'poll', 'seconds_elapsed', 'signal_set', 'start_time',
'term_width', 'update_interval', 'widgets', '_time_sensitive',
'__iterable')
_DEFAULT_MAXVAL = 100
_DEFAULT_TERMSIZE = 80
_DEFAULT_WIDGETS = [widgets.Percentage(), ' ', widgets.Bar()]
def __init__(self, maxval=None, widgets=None, term_width=None, poll=1,
left_justify=True, fd=sys.stderr):
"""Initializes a progress bar with sane defaults."""
# Don't share a reference with any other progress bars
if widgets is None:
widgets = list(self._DEFAULT_WIDGETS)
self.maxval = maxval
self.widgets = widgets
self.fd = fd
self.left_justify = left_justify
self.signal_set = False
if term_width is not None:
self.term_width = term_width
else:
try:
self._handle_resize()
signal.signal(signal.SIGWINCH, self._handle_resize)
self.signal_set = True
except (SystemExit, KeyboardInterrupt): raise
except:
self.term_width = self._env_size()
self.__iterable = None
self._update_widgets()
self.currval = 0
self.finished = False
self.last_update_time = None
self.poll = poll
self.seconds_elapsed = 0
self.start_time = None
self.update_interval = 1
def __call__(self, iterable):
"""Use a ProgressBar to iterate through an iterable."""
try:
self.maxval = len(iterable)
except:
if self.maxval is None:
self.maxval = UnknownLength
self.__iterable = iter(iterable)
return self
def __iter__(self):
return self
def __next__(self):
try:
value = next(self.__iterable)
if self.start_time is None: self.start()
else: self.update(self.currval + 1)
return value
except StopIteration:
self.finish()
raise
# Create an alias so that Python 2.x won't complain about not being
# an iterator.
next = __next__
def _env_size(self):
"""Tries to find the term_width from the environment."""
return int(os.environ.get('COLUMNS', self._DEFAULT_TERMSIZE)) - 1
def _handle_resize(self, signum=None, frame=None):
"""Tries to catch resize signals sent from the terminal."""
h, w = array('h', ioctl(self.fd, termios.TIOCGWINSZ, '\0' * 8))[:2]
self.term_width = w
def percentage(self):
"""Returns the progress as a percentage."""
return self.currval * 100.0 / self.maxval
percent = property(percentage)
def _format_widgets(self):
result = []
expanding = []
width = self.term_width
for index, widget in enumerate(self.widgets):
if isinstance(widget, widgets.WidgetHFill):
result.append(widget)
expanding.insert(0, index)
else:
widget = widgets.format_updatable(widget, self)
result.append(widget)
width -= len(widget)
count = len(expanding)
while count:
portion = max(int(math.ceil(width * 1. / count)), 0)
index = expanding.pop()
count -= 1
widget = result[index].update(self, portion)
width -= len(widget)
result[index] = widget
return result
def _format_line(self):
"""Joins the widgets and justifies the line."""
widgets = ''.join(self._format_widgets())
if self.left_justify: return widgets.ljust(self.term_width)
else: return widgets.rjust(self.term_width)
def _need_update(self):
"""Returns whether the ProgressBar should redraw the line."""
if self.currval >= self.next_update or self.finished: return True
delta = time.time() - self.last_update_time
return self._time_sensitive and delta > self.poll
def _update_widgets(self):
"""Checks all widgets for the time sensitive bit."""
self._time_sensitive = any(getattr(w, 'TIME_SENSITIVE', False)
for w in self.widgets)
def update(self, value=None):
"""Updates the ProgressBar to a new value."""
if value is not None and value is not UnknownLength:
if (self.maxval is not UnknownLength
and not 0 <= value <= self.maxval):
raise ValueError('Value out of range')
self.currval = value
if not self._need_update(): return
if self.start_time is None:
raise RuntimeError('You must call "start" before calling "update"')
now = time.time()
self.seconds_elapsed = now - self.start_time
self.next_update = self.currval + self.update_interval
self.fd.write(self._format_line() + '\r')
self.last_update_time = now
def start(self):
"""Starts measuring time, and prints the bar at 0%.
It returns self so you can use it like this:
>>> pbar = ProgressBar().start()
>>> for i in range(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
"""
if self.maxval is None:
self.maxval = self._DEFAULT_MAXVAL
self.num_intervals = max(100, self.term_width)
self.next_update = 0
if self.maxval is not UnknownLength:
if self.maxval < 0: raise ValueError('Value out of range')
self.update_interval = self.maxval / self.num_intervals
self.start_time = self.last_update_time = time.time()
self.update(0)
return self
def finish(self):
"""Puts the ProgressBar bar in the finished state."""
self.finished = True
self.update(self.maxval)
self.fd.write('\n')
if self.signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
|
mit
| -1,432,068,487,588,659,200 | -174,024,495,820,889,900 | 30.047458 | 79 | 0.605634 | false |
mou4e/zirconium
|
chrome/common/extensions/docs/server2/samples_data_source.py
|
16
|
1167
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import traceback
from data_source import DataSource
from extensions_paths import EXAMPLES
from future import All, Future
from jsc_view import CreateSamplesView
from platform_util import GetPlatforms
class SamplesDataSource(DataSource):
'''Constructs a list of samples and their respective files and api calls.
'''
def __init__(self, server_instance, request):
self._platform_bundle = server_instance.platform_bundle
self._request = request
def _GetImpl(self, platform):
cache = self._platform_bundle.GetSamplesModel(platform).GetCache()
create_view = lambda samp_list: CreateSamplesView(samp_list, self._request)
return cache.GetFromFileListing('' if platform == 'apps'
else EXAMPLES).Then(create_view)
def get(self, platform):
return self._GetImpl(platform).Get()
def GetRefreshPaths(self):
return [platform for platform in GetPlatforms()]
def Refresh(self, path):
return self._GetImpl(path)
|
bsd-3-clause
| -5,163,958,180,425,669,000 | -3,567,403,182,326,281,000 | 32.342857 | 79 | 0.725793 | false |
neubot/neubot
|
neubot/system_posix.py
|
2
|
3747
|
# neubot/system_posix.py
#
# Copyright (c) 2010-2011
# Nexa Center for Internet & Society, Politecnico di Torino (DAUIN)
# and Simone Basso <[email protected]>
#
# This file is part of Neubot <http://www.neubot.org/>.
#
# Neubot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Neubot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Neubot. If not, see <http://www.gnu.org/licenses/>.
#
'''
Code for UNIX
'''
# NB: This code is currently being refactored.
#
# When we MUST exit better to use os._exit() rather than
# sys.exit() because the former cannot be catched while
# the latter can.
#
UNPRIV_USER = '_neubot'
import os
import syslog
from neubot import utils_hier
from neubot import utils_posix
from neubot import utils_rc
def __logger(severity, message):
''' Log @message at the given @severity using syslog '''
#
# Implemented using syslog becuse SysLogHandler is
# difficult to use: you need to know the path to the
# system specific ``/dev/log``.
#
if severity == 'ERROR':
syslog.syslog(syslog.LOG_ERR, message)
elif severity == 'WARNING':
syslog.syslog(syslog.LOG_WARNING, message)
elif severity == 'DEBUG':
syslog.syslog(syslog.LOG_DEBUG, message)
else:
syslog.syslog(syslog.LOG_INFO, message)
def get_background_logger():
''' Return the background logger '''
syslog.openlog("neubot", syslog.LOG_PID, syslog.LOG_DAEMON)
return __logger
def _get_profile_dir():
''' The profile directory is always LOCALSTATEDIR '''
return utils_hier.LOCALSTATEDIR
def _want_rwx_dir(datadir):
'''
This function ensures that the unprivileged user is the
owner of the directory that contains Neubot database.
Otherwise sqlite3 fails to lock the database for writing
(it creates a lockfile for that).
Read more at http://www.neubot.org/node/14
'''
# Does the directory exist?
if not os.path.isdir(datadir):
os.mkdir(datadir, 493) # 0755 in base 10
# Change directory ownership
if os.getuid() == 0:
passwd = getpwnam()
os.chown(datadir, passwd.pw_uid, passwd.pw_gid)
def go_background():
''' Detach from the shell and run in background '''
utils_posix.daemonize(pidfile='/var/run/neubot.pid')
def getpwnam():
''' Wrapper for getpwnam '''
cnf = utils_rc.parse_safe('/etc/neubot/users')
unpriv_user = cnf.get('unpriv_user', UNPRIV_USER)
passwd = utils_posix.getpwnam(unpriv_user)
return passwd
def drop_privileges():
'''
Drop root privileges and run on behalf of the specified
unprivileged users.
'''
passwd = getpwnam()
utils_posix.chuser(passwd)
def _want_rw_file(path):
'''
Ensure that the given file is readable and writable
by its owner. If running as root force ownership
to be of the unprivileged user.
'''
# Create file if non-existent
filep = open(path, "ab+")
filep.close()
# Enforce file ownership
if os.getuid() == 0:
passwd = getpwnam()
os.chown(path, passwd.pw_uid, passwd.pw_gid)
# Set permissions
os.chmod(path, 420) # 0644 in base 10
def has_enough_privs():
''' Returns true if this process has enough privileges '''
return os.getuid() == 0
|
gpl-3.0
| 7,062,018,822,715,837,000 | 4,740,404,377,481,594,000 | 26.962687 | 71 | 0.671471 | false |
carlodri/moviepy
|
moviepy/video/io/VideoFileClip.py
|
14
|
2711
|
import os
from moviepy.video.VideoClip import VideoClip
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.Clip import Clip
from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
class VideoFileClip(VideoClip):
"""
A video clip originating from a movie file. For instance: ::
>>> clip = VideofileClip("myHolidays.mp4")
>>> clip2 = VideofileClip("myMaskVideo.avi")
Parameters
------------
filename:
The name of the video file. It can have any extension supported
by ffmpeg: .ogv, .mp4, .mpeg, .avi, .mov etc.
has_mask:
Set this to 'True' if there is a mask included in the videofile.
Video files rarely contain masks, but some video codecs enable
that. For istance if you have a MoviePy VideoClip with a mask you
can save it to a videofile with a mask. (see also
``VideoClip.write_videofile`` for more details).
audio:
Set to `False` if the clip doesn't have any audio or if you do not
wish to read the audio.
Attributes
-----------
filename:
Name of the original video file.
fps:
Frames per second in the original file.
"""
def __init__(self, filename, has_mask=False,
audio=True, audio_buffersize = 200000,
audio_fps=44100, audio_nbytes=2, verbose=False):
VideoClip.__init__(self)
# Make a reader
pix_fmt= "rgba" if has_mask else "rgb24"
reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt)
self.reader = reader
# Make some of the reader's attributes accessible from the clip
self.duration = self.reader.duration
self.end = self.reader.duration
self.fps = self.reader.fps
self.size = self.reader.size
if has_mask:
self.make_frame = lambda t: reader.get_frame(t)[:,:,:3]
mask_mf = lambda t: reader.get_frame(t)[:,:,3]/255.0
self.mask = (VideoClip(ismask = True, make_frame = mask_mf)
.set_duration(self.duration))
self.mask.fps = self.fps
else:
self.make_frame = lambda t: reader.get_frame(t)
# Make a reader for the audio, if any.
if audio and self.reader.infos['audio_found']:
self.audio = AudioFileClip(filename,
buffersize= audio_buffersize,
fps = audio_fps,
nbytes = audio_nbytes)
def __del__(self):
""" Close/delete the internal reader. """
del self.reader
|
mit
| 4,771,191,718,655,088,000 | 960,801,712,964,974,500 | 30.523256 | 72 | 0.571376 | false |
mithron/opendatahack
|
web/main.py
|
1
|
1805
|
from datetime import datetime
import json
import os
from urlparse import urlparse
from pymongo.connection import Connection
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
MONGO_URL = "" # found with $>heroku config
we_live = True
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/list/", MainHandler),
(r"/([0-9]+)/", SchoolHandler)
]
settings = dict(
autoescape=None,
)
tornado.web.Application.__init__(self, handlers, **settings)
if we_live:
self.con = Connection(MONGO_URL)
self.database = self.con[urlparse(MONGO_URL).path[1:]]
else:
self.con = Connection('localhost', 27017)
self.database = self.con["moscow"]
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.database
class SchoolHandler(BaseHandler):
def get(self, inn=None):
if inn:
suppliers = list(self.db["suppliers"].find({'inn': int(inn)}, fields={"_id": False}))
self.write(json.dumps(suppliers, ensure_ascii=False, encoding='utf8'))
else:
self.write("[]")
class MainHandler(BaseHandler):
def get(self):
schools = list(self.db["suppliers"].find(fields={"full_name": True, "inn": True, "_id": False}))
self.write(json.dumps(schools, ensure_ascii=False, encoding='utf8'))
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(int(os.environ.get("PORT", 8888)))
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
mit
| 4,029,121,481,845,943,300 | -7,927,634,259,526,878,000 | 26.363636 | 104 | 0.628255 | false |
jnerin/ansible
|
lib/ansible/modules/source_control/gitlab_project.py
|
16
|
14955
|
#!/usr/bin/python
# (c) 2015, Werner Dijkerman ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gitlab_project
short_description: Creates/updates/deletes Gitlab Projects
description:
- When the project does not exists in Gitlab, it will be created.
- When the project does exists and state=absent, the project will be deleted.
- When changes are made to the project, the project will be updated.
version_added: "2.1"
author: "Werner Dijkerman (@dj-wasabi)"
requirements:
- pyapi-gitlab python module
options:
server_url:
description:
- Url of Gitlab server, with protocol (http or https).
required: true
validate_certs:
description:
- When using https if SSL certificate needs to be verified.
required: false
default: true
aliases:
- verify_ssl
login_user:
description:
- Gitlab user name.
required: false
default: null
login_password:
description:
- Gitlab password for login_user
required: false
default: null
login_token:
description:
- Gitlab token for logging in.
required: false
default: null
group:
description:
- The name of the group of which this projects belongs to.
- When not provided, project will belong to user which is configured in 'login_user' or 'login_token'
- When provided with username, project will be created for this user. 'login_user' or 'login_token' needs admin rights.
required: false
default: null
name:
description:
- The name of the project
required: true
path:
description:
- The path of the project you want to create, this will be server_url/<group>/path
- If not supplied, name will be used.
required: false
default: null
description:
description:
- An description for the project.
required: false
default: null
issues_enabled:
description:
- Whether you want to create issues or not.
- Possible values are true and false.
required: false
default: true
merge_requests_enabled:
description:
- If merge requests can be made or not.
- Possible values are true and false.
required: false
default: true
wiki_enabled:
description:
- If an wiki for this project should be available or not.
- Possible values are true and false.
required: false
default: true
snippets_enabled:
description:
- If creating snippets should be available or not.
- Possible values are true and false.
required: false
default: true
public:
description:
- If the project is public available or not.
- Setting this to true is same as setting visibility_level to 20.
- Possible values are true and false.
required: false
default: false
visibility_level:
description:
- Private. visibility_level is 0. Project access must be granted explicitly for each user.
- Internal. visibility_level is 10. The project can be cloned by any logged in user.
- Public. visibility_level is 20. The project can be cloned without any authentication.
- Possible values are 0, 10 and 20.
required: false
default: 0
import_url:
description:
- Git repository which will be imported into gitlab.
- Gitlab server needs read access to this git repository.
required: false
default: false
state:
description:
- create or delete project.
- Possible values are present and absent.
required: false
default: "present"
choices: ["present", "absent"]
'''
EXAMPLES = '''
- name: Delete Gitlab Project
gitlab_project:
server_url: http://gitlab.example.com
validate_certs: False
login_token: WnUzDsxjy8230-Dy_k
name: my_first_project
state: absent
delegate_to: localhost
- name: Create Gitlab Project in group Ansible
gitlab_project:
server_url: https://gitlab.example.com
validate_certs: True
login_user: dj-wasabi
login_password: MySecretPassword
name: my_first_project
group: ansible
issues_enabled: False
wiki_enabled: True
snippets_enabled: True
import_url: http://git.example.com/example/lab.git
state: present
delegate_to: localhost
'''
RETURN = '''# '''
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except:
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class GitLabProject(object):
def __init__(self, module, git):
self._module = module
self._gitlab = git
def createOrUpdateProject(self, project_exists, group_name, import_url, arguments):
is_user = False
group_id = self.getGroupId(group_name)
if not group_id:
group_id = self.getUserId(group_name)
is_user = True
if project_exists:
# Edit project
return self.updateProject(group_name, arguments)
else:
# Create project
if self._module.check_mode:
self._module.exit_json(changed=True)
return self.createProject(is_user, group_id, import_url, arguments)
def createProject(self, is_user, user_id, import_url, arguments):
if is_user:
return self._gitlab.createprojectuser(user_id=user_id, import_url=import_url, **arguments)
else:
group_id = user_id
return self._gitlab.createproject(namespace_id=group_id, import_url=import_url, **arguments)
def deleteProject(self, group_name, project_name):
if self.existsGroup(group_name):
project_owner = group_name
else:
project_owner = self._gitlab.currentuser()['username']
search_results = self._gitlab.searchproject(search=project_name)
for result in search_results:
owner = result['namespace']['name']
if owner == project_owner:
return self._gitlab.deleteproject(result['id'])
def existsProject(self, group_name, project_name):
if self.existsGroup(group_name):
project_owner = group_name
else:
project_owner = self._gitlab.currentuser()['username']
search_results = self._gitlab.searchproject(search=project_name)
for result in search_results:
owner = result['namespace']['name']
if owner == project_owner:
return True
return False
def existsGroup(self, group_name):
if group_name is not None:
# Find the group, if group not exists we try for user
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return True
user_name = group_name
user_data = self._gitlab.getusers(search=user_name)
for data in user_data:
if 'id' in user_data:
return True
return False
def getGroupId(self, group_name):
if group_name is not None:
# Find the group, if group not exists we try for user
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return group['id']
def getProjectId(self, group_name, project_name):
if self.existsGroup(group_name):
project_owner = group_name
else:
project_owner = self._gitlab.currentuser()['username']
search_results = self._gitlab.searchproject(search=project_name)
for result in search_results:
owner = result['namespace']['name']
if owner == project_owner:
return result['id']
def getUserId(self, user_name):
user_data = self._gitlab.getusers(search=user_name)
for data in user_data:
if 'id' in data:
return data['id']
return self._gitlab.currentuser()['id']
def to_bool(self, value):
if value:
return 1
else:
return 0
def updateProject(self, group_name, arguments):
project_changed = False
project_name = arguments['name']
project_id = self.getProjectId(group_name, project_name)
project_data = self._gitlab.getproject(project_id=project_id)
for arg_key, arg_value in arguments.items():
project_data_value = project_data[arg_key]
if isinstance(project_data_value, bool) or project_data_value is None:
to_bool = self.to_bool(project_data_value)
if to_bool != arg_value:
project_changed = True
continue
else:
if project_data_value != arg_value:
project_changed = True
if project_changed:
if self._module.check_mode:
self._module.exit_json(changed=True)
return self._gitlab.editproject(project_id=project_id, **arguments)
else:
return False
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True),
validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
login_user=dict(required=False, no_log=True),
login_password=dict(required=False, no_log=True),
login_token=dict(required=False, no_log=True),
group=dict(required=False),
name=dict(required=True),
path=dict(required=False),
description=dict(required=False),
issues_enabled=dict(default=True, type='bool'),
merge_requests_enabled=dict(default=True, type='bool'),
wiki_enabled=dict(default=True, type='bool'),
snippets_enabled=dict(default=True, type='bool'),
public=dict(default=False, type='bool'),
visibility_level=dict(default="0", choices=["0", "10", "20"]),
import_url=dict(required=False),
state=dict(default="present", choices=["present", 'absent']),
),
supports_check_mode=True
)
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install pyapi-gitlab")
server_url = module.params['server_url']
verify_ssl = module.params['validate_certs']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_token = module.params['login_token']
group_name = module.params['group']
project_name = module.params['name']
project_path = module.params['path']
description = module.params['description']
issues_enabled = module.params['issues_enabled']
merge_requests_enabled = module.params['merge_requests_enabled']
wiki_enabled = module.params['wiki_enabled']
snippets_enabled = module.params['snippets_enabled']
public = module.params['public']
visibility_level = module.params['visibility_level']
import_url = module.params['import_url']
state = module.params['state']
# We need both login_user and login_password or login_token, otherwise we fail.
if login_user is not None and login_password is not None:
use_credentials = True
elif login_token is not None:
use_credentials = False
else:
module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token")
# Set project_path to project_name if it is empty.
if project_path is None:
project_path = project_name.replace(" ", "_")
# Gitlab API makes no difference between upper and lower cases, so we lower them.
project_name = project_name.lower()
project_path = project_path.lower()
if group_name is not None:
group_name = group_name.lower()
# Lets make an connection to the Gitlab server_url, with either login_user and login_password
# or with login_token
try:
if use_credentials:
git = gitlab.Gitlab(host=server_url, verify_ssl=verify_ssl)
git.login(user=login_user, password=login_password)
else:
git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl)
except Exception as e:
module.fail_json(msg="Failed to connect to Gitlab server: %s " % to_native(e))
# Check if user is authorized or not before proceeding to any operations
# if not, exit from here
auth_msg = git.currentuser().get('message', None)
if auth_msg is not None and auth_msg == '401 Unauthorized':
module.fail_json(msg='User unauthorized',
details="User is not allowed to access Gitlab server "
"using login_token. Please check login_token")
# Validate if project exists and take action based on "state"
project = GitLabProject(module, git)
project_exists = project.existsProject(group_name, project_name)
# Creating the project dict
arguments = {"name": project_name,
"path": project_path,
"description": description,
"issues_enabled": project.to_bool(issues_enabled),
"merge_requests_enabled": project.to_bool(merge_requests_enabled),
"wiki_enabled": project.to_bool(wiki_enabled),
"snippets_enabled": project.to_bool(snippets_enabled),
"public": project.to_bool(public),
"visibility_level": int(visibility_level)}
if project_exists and state == "absent":
project.deleteProject(group_name, project_name)
module.exit_json(changed=True, result="Successfully deleted project %s" % project_name)
else:
if state == "absent":
module.exit_json(changed=False, result="Project deleted or does not exists")
else:
if project.createOrUpdateProject(project_exists, group_name, import_url, arguments):
module.exit_json(changed=True, result="Successfully created or updated the project %s" % project_name)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
gpl-3.0
| 8,654,169,460,846,227,000 | 8,073,050,170,267,556,000 | 36.017327 | 131 | 0.611234 | false |
dexterx17/nodoSocket
|
clients/Python-2.7.6/Lib/bsddb/test/test_early_close.py
|
72
|
7440
|
"""TestCases for checking that it does not segfault when a DBEnv object
is closed before its DB objects.
"""
import os, sys
import unittest
from test_all import db, test_support, verbose, get_new_environment_path, get_new_database_path
# We're going to get warnings in this module about trying to close the db when
# its env is already closed. Let's just ignore those.
try:
import warnings
except ImportError:
pass
else:
warnings.filterwarnings('ignore',
message='DB could not be closed in',
category=RuntimeWarning)
#----------------------------------------------------------------------
class DBEnvClosedEarlyCrash(unittest.TestCase):
def setUp(self):
self.homeDir = get_new_environment_path()
self.filename = "test"
def tearDown(self):
test_support.rmtree(self.homeDir)
def test01_close_dbenv_before_db(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
d = db.DB(dbenv)
d2 = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
self.assertRaises(db.DBNoSuchFileError, d2.open,
self.filename+"2", db.DB_BTREE, db.DB_THREAD, 0666)
d.put("test","this is a test")
self.assertEqual(d.get("test"), "this is a test", "put!=get")
dbenv.close() # This "close" should close the child db handle also
self.assertRaises(db.DBError, d.get, "test")
def test02_close_dbenv_before_dbcursor(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
d.put("test","this is a test")
d.put("test2","another test")
d.put("test3","another one")
self.assertEqual(d.get("test"), "this is a test", "put!=get")
c=d.cursor()
c.first()
c.next()
d.close() # This "close" should close the child db handle also
# db.close should close the child cursor
self.assertRaises(db.DBError,c.next)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
c=d.cursor()
c.first()
c.next()
dbenv.close()
# The "close" should close the child db handle also, with cursors
self.assertRaises(db.DBError, c.next)
def test03_close_db_before_dbcursor_without_env(self):
import os.path
path=os.path.join(self.homeDir,self.filename)
d = db.DB()
d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
d.put("test","this is a test")
d.put("test2","another test")
d.put("test3","another one")
self.assertEqual(d.get("test"), "this is a test", "put!=get")
c=d.cursor()
c.first()
c.next()
d.close()
# The "close" should close the child db handle also
self.assertRaises(db.DBError, c.next)
def test04_close_massive(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
dbs=[db.DB(dbenv) for i in xrange(16)]
cursors=[]
for i in dbs :
i.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
dbs[10].put("test","this is a test")
dbs[10].put("test2","another test")
dbs[10].put("test3","another one")
self.assertEqual(dbs[4].get("test"), "this is a test", "put!=get")
for i in dbs :
cursors.extend([i.cursor() for j in xrange(32)])
for i in dbs[::3] :
i.close()
for i in cursors[::3] :
i.close()
# Check for missing exception in DB! (after DB close)
self.assertRaises(db.DBError, dbs[9].get, "test")
# Check for missing exception in DBCursor! (after DB close)
self.assertRaises(db.DBError, cursors[101].first)
cursors[80].first()
cursors[80].next()
dbenv.close() # This "close" should close the child db handle also
# Check for missing exception! (after DBEnv close)
self.assertRaises(db.DBError, cursors[80].next)
def test05_close_dbenv_delete_db_success(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
dbenv.close() # This "close" should close the child db handle also
del d
try:
import gc
except ImportError:
gc = None
if gc:
# force d.__del__ [DB_dealloc] to be called
gc.collect()
def test06_close_txn_before_dup_cursor(self) :
dbenv = db.DBEnv()
dbenv.open(self.homeDir,db.DB_INIT_TXN | db.DB_INIT_MPOOL |
db.DB_INIT_LOG | db.DB_CREATE)
d = db.DB(dbenv)
txn = dbenv.txn_begin()
d.open(self.filename, dbtype = db.DB_HASH, flags = db.DB_CREATE,
txn=txn)
d.put("XXX", "yyy", txn=txn)
txn.commit()
txn = dbenv.txn_begin()
c1 = d.cursor(txn)
c2 = c1.dup()
self.assertEqual(("XXX", "yyy"), c1.first())
# Not interested in warnings about implicit close.
import warnings
if sys.version_info < (2, 6) :
# Completely resetting the warning state is
# problematic with python >=2.6 with -3 (py3k warning),
# because some stdlib modules selectively ignores warnings.
warnings.simplefilter("ignore")
txn.commit()
warnings.resetwarnings()
else :
# When we drop support for python 2.4
# we could use: (in 2.5 we need a __future__ statement)
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# txn.commit()
#
# We can not use "with" as is, because it would be invalid syntax
# in python 2.4 and (with no __future__) 2.5.
# Here we simulate "with" following PEP 343 :
w = warnings.catch_warnings()
w.__enter__()
try :
warnings.simplefilter("ignore")
txn.commit()
finally :
w.__exit__()
self.assertRaises(db.DBCursorClosedError, c2.first)
def test07_close_db_before_sequence(self):
import os.path
path=os.path.join(self.homeDir,self.filename)
d = db.DB()
d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
dbs=db.DBSequence(d)
d.close() # This "close" should close the child DBSequence also
dbs.close() # If not closed, core dump (in Berkeley DB 4.6.*)
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBEnvClosedEarlyCrash))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
mit
| 1,471,670,200,872,444,700 | -4,185,867,921,146,321,000 | 33.604651 | 95 | 0.554839 | false |
cgmckeever/contests
|
2012-mebipenny/contest/reductio-ad-egyptium/solution.py
|
7
|
1843
|
import sys
import math
class Fraction:
def __init__(self, num, denom):
if (num > 0 and denom % num == 0):
# Reduce the fraction
denom /= num
num = 1
self.num = num
self.denom = denom
def subtract(self, other_num, other_denom):
common_denom = self.denom * other_denom
converted_num = self.num * common_denom / self.denom
converted_other_num = other_num * common_denom / other_denom
return Fraction(converted_num - converted_other_num, common_denom)
def largest_contained_egyptian(self):
if self.num == 0:
return Fraction(0, self.denom)
if self.num == 1:
return Fraction(1, self.denom)
next_denom = int(math.ceil((0.0 + self.denom) / self.num))
next_fraction = Fraction(1, next_denom)
return next_fraction
def __str__(self):
return "%d/%d" % (self.num, self.denom)
def main(num, denom):
goal = Fraction(num, denom)
curr_denom = goal.largest_contained_egyptian().denom
final_denoms = []
while goal.num != 0:
remainder = goal.subtract(1, curr_denom)
if remainder.num >= 0:
final_denoms.append(curr_denom)
goal = remainder
if False:
# simple version
curr_denom += 1;
else:
# advanced version: intelligently jump to the next available denominator
next_fraction = goal.largest_contained_egyptian()
curr_denom = next_fraction.denom
if goal.subtract(next_fraction.num, next_fraction.denom).num < 0:
print "*** rounding error ***"
final_denoms.append(0)
goal.num = 0
components = ["%d" % x for x in final_denoms]
print "%s" % ' '.join(components)
if __name__ == "__main__":
while True:
data = sys.stdin.readline()
if not data:
break
n, d = data.split(' ')
n = int(n)
d = int(d)
main(n, d)
|
mit
| 3,694,100,035,508,590,000 | -3,103,952,117,226,333,700 | 24.246575 | 78 | 0.603907 | false |
aptomar/apt-file-format
|
test/testAptofile.py
|
1
|
23249
|
################################################################
# #
# testAptofile.py #
# Copyright (c) 2013 Aptomar AS, All Rights Reserved #
# #
# Author: Jarle Bauck Hamar: <[email protected]> #
# Date: 2013-05-23 #
# #
################################################################
import unittest
import sys
import json
sys.path.append('../src')
from aptofile import Aptofile
import jsonschema
class TestManifest(unittest.TestCase):
def setUp(self):
with open('tests/header.json') as fid:
self.inst = json.load(fid)
self.schema = Aptofile.SCHEMA
def validate(self):
try:
jsonschema.validate(self.inst, self.schema, Aptofile.VALIDATOR,
format_checker = jsonschema.FormatChecker())
except jsonschema.ValidationError:
return False
return True
def test_schema_validates(self):
Aptofile.VALIDATOR.check_schema(Aptofile.SCHEMA)
def test_valid_manifest_header(self):
self.assertTrue(self.validate())
def test_manifest_missing_date(self):
del self.inst["date"]
self.assertFalse(self.validate())
def test_manifest_missing_description(self):
del self.inst["description"]
self.assertFalse(self.validate())
def test_manifest_missing_version(self):
del self.inst["manifest_version"]
self.assertFalse(self.validate())
def test_manifest_missing_generator(self):
del self.inst["generator"]
self.assertFalse(self.validate())
def test_manifest_bad_date(self):
self.inst["date"] = "tomorrow"
self.assertFalse(self.validate())
def test_manifest_disallow_additional_properties(self):
self.inst["extra"] = "large"
self.assertFalse(self.validate())
class TestAsset(unittest.TestCase):
def testCreateAsset(self):
f = 'tests/asset.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'file:/layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
#Validate after write and open
self.assertTrue(Aptofile.validateFile(f))
def testAssetMissingFile(self):
f = 'tests/asset_missing_file.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
af.addFile2Layer('resource3.png','layer2','resources', writeFile=False)
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
def testAssetIncorrectLayerInGroup(self):
f = 'tests/asset_incorrect_layer_in_group.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer3'])
#Validate before write:
self.assertFalse(af.validate())
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
def testAssetMissingStyle(self):
f = 'tests/asset_missing_style.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
del af.manifest['asset']['layers']['layer1']['style']
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
def testAssetIncorrectDataType(self):
f = 'tests/asset_incorrect_data_type.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
d=af.manifest['asset']['layers']['layer1']['style']['data'].pop()
af.manifest['asset']['layers']['layer1']['style']['data'] = d
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
class TestImage(unittest.TestCase):
def testImage(self):
f = 'tests/image.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testImageMissingDate(self):
f = 'tests/image_missing_date.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
del af.manifest['image']['created']
self.assertFalse(Aptofile.validateFile(f))
def testImageIncorrectDate(self):
f = 'tests/image_missing_date.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
af.manifest['image']['created'] = '23.05.13'
af.validate()
self.assertFalse(Aptofile.validateFile(f))
def testImageMissingFileAndGenerator(self):
f = 'tests/image_missing_file_and_generator.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.manifest['image']['data']=['image.jpg']
del af.manifest['generator']
self.assertFalse(af.validate())
self.assertFalse(Aptofile.validateFile(f))
def testImageMissingGenerator(self):
f = 'tests/image_missing_generator.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
del af.manifest['generator']
self.assertFalse(Aptofile.validateFile(f))
class testVideo(unittest.TestCase):
def testVideo(self):
f = 'tests/video.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
af.addVideoFile(('tests/video/video.avi','video.avi'))
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testVideoMissingFile(self):
f = 'tests/video_missing_file.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
self.assertFalse(af.validate())
self.assertFalse(Aptofile.validateFile(f))
def testVideoFileNotFound(self):
f = 'tests/video_file_not_found.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
af.manifest['video']['data']=['video.avi']
self.assertFalse(af.validate())
self.assertFalse(Aptofile.validateFile(f))
def testVideoMissingName(self):
f = 'tests/video_missing_name.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
af.addVideoFile(('tests/video/video.avi','video.avi'))
self.assertTrue(af.validate())
del af.manifest['video']['name']
self.assertFalse(Aptofile.validateFile(f))
class TestPoint(unittest.TestCase):
def testPoint(self):
f = 'tests/point.apt'
with Aptofile.create(f,'point') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the point.')
af.setPointName('The Point')
af.setPointDescription('This is a description of a point.')
af.setPointType('boat')
af.setPointGeometry('data:data_describing_the_point')
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testPointInvalidType(self):
f = 'tests/point_invalid_type.apt'
with Aptofile.create(f,'point') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the point.')
af.setPointName('The Point')
af.setPointDescription('This is a description of a point.')
af.setPointType('boat')
af.setPointGeometry('data:data_describing_the_point')
self.assertTrue(af.validate())
af.manifest['point']['object-type'] = 'UFO'
self.assertFalse(Aptofile.validateFile(f))
def testRoute(self):
f = 'tests/route.apt'
with Aptofile.create(f,'route') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the route.')
af.setRouteName('The Route')
af.setRouteDescription('This is a description of the route.')
af.setRouteGeometry('data:data_describing_the_route')
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testRouteMissingGeometry(self):
f = 'tests/route.apt'
with Aptofile.create(f,'route') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the route.')
af.setRouteName('The Route')
af.setRouteDescription('This is a description of the route.')
af.setRouteGeometry('data:data_describing_the_route')
self.assertTrue(af.validate())
del af.manifest['route']['geometry']
self.assertFalse(Aptofile.validateFile(f))
class TestArea(unittest.TestCase):
def testArea(self):
f = 'tests/area.apt'
with Aptofile.create(f,'area') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the area.')
af.setAreaName('The Point')
af.setAreaDescription('This is a description of the area.')
af.setAreaGeometry('data:data_describing_the_area')
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testAreaMissingAreaDescription(self):
f = 'tests/area_missing_area_desc.apt'
with Aptofile.create(f,'area') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the area.')
af.setAreaName('The Point')
af.setAreaDescription('This is a description of a area.')
af.setAreaGeometry('data:data_describing_the_area')
self.assertTrue(af.validate())
del af.manifest['area']['description']
self.assertFalse(Aptofile.validateFile(f))
if __name__=='__main__':
unittest.main()
|
bsd-3-clause
| -2,989,174,167,242,607,600 | -255,049,936,691,212,500 | 46.543967 | 94 | 0.555422 | false |
uclouvain/osis
|
base/migrations/0062_add_uuid_field.py
|
2
|
2432
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-05 12:48
from __future__ import unicode_literals
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0061_auto_20160902_1639'),
]
operations = [
migrations.RemoveField(
model_name='domainoffer',
name='domain',
),
migrations.RemoveField(
model_name='domainoffer',
name='offer_year',
),
migrations.AddField(
model_name='academicyear',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='campus',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='offer',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='offerenrollment',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='offeryear',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='offeryeardomain',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='organization',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='person',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='student',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='tutor',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.DeleteModel(
name='DomainOffer',
),
]
|
agpl-3.0
| -1,653,940,323,621,866,200 | 2,747,153,206,937,140,700 | 30.179487 | 81 | 0.546875 | false |
ojengwa/grr
|
lib/rdfvalues/checks.py
|
2
|
12558
|
#!/usr/bin/env python
"""Implementation of check types."""
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib.checks import checks
from grr.lib.checks import filters
from grr.lib.checks import hints
from grr.lib.checks import triggers
from grr.lib.rdfvalues import structs
from grr.proto import checks_pb2
def ValidateMultiple(component, hint):
errors = []
for item in component:
try:
item.Validate()
except (checks.DefinitionError) as e:
errors.append(str(e))
if errors:
raise checks.DefinitionError("%s:\n %s" % (hint, "\n ".join(errors)))
def MatchStrToList(match=None):
# Set a default match type of ANY, if unset.
# Allow multiple match types, either as a list or as a string.
if match is None:
match = ["ANY"]
elif isinstance(match, basestring):
match = match.split()
return match
class CheckResult(structs.RDFProtoStruct):
"""Results of a single check performed on a host."""
protobuf = checks_pb2.CheckResult
def __nonzero__(self):
return bool(self.anomaly)
def ExtendAnomalies(self, other):
"""Merge anomalies from another CheckResult."""
for o in other:
if o is not None:
self.anomaly.Extend(list(o.anomaly))
class CheckResults(structs.RDFProtoStruct):
"""All results for a single host."""
protobuf = checks_pb2.CheckResults
def __nonzero__(self):
return bool(self.result)
class Target(structs.RDFProtoStruct):
"""Definitions of hosts to target."""
protobuf = checks_pb2.Target
def __init__(self, initializer=None, age=None, **kwargs):
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
super(Target, self).__init__(initializer=initializer, age=age, **conf)
def __nonzero__(self):
return any([self.cpe, self.os, self.label])
def Validate(self):
if self.cpe:
# TODO(user): Add CPE library to GRR.
pass
if self.os:
pass
if self.label:
pass
class Check(structs.RDFProtoStruct):
"""A definition of a problem, and ways to detect it.
Checks contain an identifier of a problem (check_id) that is a reference to an
externally or internally defined vulnerability.
Checks use one or more Methods to determine if an issue exists. Methods define
data collection and processing, and return an Anomaly if the conditions tested
by the method weren't met.
Checks can define a default platform, OS or environment to target. This
is passed to each Method, but can be overridden by more specific definitions.
"""
protobuf = checks_pb2.Check
def __init__(self, initializer=None, age=None, check_id=None, target=None,
match=None, method=None, hint=None):
super(Check, self).__init__(initializer=initializer, age=age)
self.check_id = check_id
self.match = MatchStrToList(match)
self.hint = Hint(hint, reformat=False)
self.target = target
if method is None:
method = []
self.triggers = triggers.Triggers()
self.matcher = checks.Matcher(self.match, self.hint)
for cfg in method:
# Use the value of "target" as a default for each method, if defined.
# Targets defined in methods or probes override this default value.
if hint:
cfg["hint"] = hints.Overlay(child=cfg.get("hint", {}), parent=hint)
if target:
cfg.setdefault("target", target)
# Create the method and add its triggers to the check.
m = Method(**cfg)
self.method.append(m)
self.triggers.Update(m.triggers, callback=m)
self.artifacts = set([t.artifact for t in self.triggers.conditions])
def SelectChecks(self, conditions):
"""Identifies which check methods to use based on host attributes.
Queries the trigger map for any check methods that apply to a combination of
OS, CPE and/or label.
Args:
conditions: A list of Condition objects.
Returns:
A list of method callbacks that should perform checks.
"""
return self.triggers.Calls(conditions)
def UsesArtifact(self, artifacts):
"""Determines if the check uses the specified artifact.
Args:
artifacts: Either a single artifact name, or a list of artifact names
Returns:
True if the check uses a specific artifact.
"""
# If artifact is a single string, see if it is in the list of artifacts
# as-is. Otherwise, test whether any of the artifacts passed in to this
# function exist in the list of artifacts.
if isinstance(artifacts, basestring):
return artifacts in self.artifacts
else:
return any(True for artifact in artifacts if artifact in self.artifacts)
def Parse(self, conditions, host_data):
"""Runs methods that evaluate whether collected host_data has an issue.
Args:
conditions: A list of conditions to determine which Methods to trigger.
host_data: A map of artifacts and rdf data.
Returns:
A CheckResult populated with Anomalies if an issue exists.
"""
result = CheckResult(check_id=self.check_id)
methods = self.SelectChecks(conditions)
result.ExtendAnomalies([m.Parse(conditions, host_data) for m in methods])
return result
def Validate(self):
"""Check the method is well constructed."""
if not self.check_id:
raise checks.DefinitionError("Check has missing check_id value")
cls_name = self.check_id
if not self.method:
raise checks.DefinitionError("Check %s has no methods" % cls_name)
ValidateMultiple(self.method,
"Check %s has invalid method definitions" % cls_name)
class Method(structs.RDFProtoStruct):
"""A specific test method using 0 or more filters to process data."""
protobuf = checks_pb2.Method
def __init__(self, initializer=None, age=None, **kwargs):
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
super(Method, self).__init__(initializer=initializer, age=age)
probe = conf.get("probe", {})
resource = conf.get("resource", {})
hint = conf.get("hint", {})
target = conf.get("target", {})
if hint:
# Add the hint to children.
for cfg in probe:
cfg["hint"] = hints.Overlay(child=cfg.get("hint", {}), parent=hint)
self.probe = [Probe(**cfg) for cfg in probe]
self.hint = Hint(hint, reformat=False)
self.match = MatchStrToList(kwargs.get("match"))
self.matcher = checks.Matcher(self.match, self.hint)
self.resource = [rdfvalue.Dict(**r) for r in resource]
self.target = Target(**target)
self.triggers = triggers.Triggers()
for p in self.probe:
# If the probe has a target, use it. Otherwise, use the method's target.
target = p.target or self.target
self.triggers.Add(p.artifact, target, p)
def Parse(self, conditions, host_data):
"""Runs probes that evaluate whether collected data has an issue.
Args:
conditions: The trigger conditions.
host_data: A map of artifacts and rdf data.
Returns:
Anomalies if an issue exists.
"""
processed = []
probes = self.triggers.Calls(conditions)
for p in probes:
# TODO(user): Need to use the (artifact, rdf_data tuple).
# Get the data required for the probe.
rdf_data = host_data.get(p.artifact)
result = p.Parse(rdf_data)
if result:
processed.append(result)
# Matcher compares the number of probes that triggered with results.
return self.matcher.Detect(probes, processed)
def Validate(self):
"""Check the Method is well constructed."""
ValidateMultiple(self.probe, "Method has invalid probes")
ValidateMultiple(self.target, "Method has invalid target")
ValidateMultiple(self.hint, "Method has invalid hint")
class Probe(structs.RDFProtoStruct):
"""The suite of filters applied to host data."""
protobuf = checks_pb2.Probe
def __init__(self, initializer=None, age=None, **kwargs):
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
conf["match"] = MatchStrToList(kwargs.get("match"))
super(Probe, self).__init__(initializer=initializer, age=age, **conf)
if self.filters:
handler = filters.GetHandler(mode=self.mode)
else:
handler = filters.GetHandler()
self.baseliner = handler(artifact=self.artifact, filters=self.baseline)
self.handler = handler(artifact=self.artifact, filters=self.filters)
hinter = Hint(conf.get("hint", {}), reformat=False)
self.matcher = checks.Matcher(conf["match"], hinter)
def Parse(self, rdf_data):
"""Process rdf data through filters. Test if results match expectations.
Processing of rdf data is staged by a filter handler, which manages the
processing of host data. The output of the filters are compared against
expected results.
Args:
rdf_data: An iterable containing 0 or more rdf values.
Returns:
An anomaly if data didn't match expectations.
"""
# TODO(user): Make sure that the filters are called on collected data.
if self.baseline:
comparison = self.baseliner.Parse(rdf_data)
else:
comparison = rdf_data
found = self.handler.Parse(comparison)
results = self.hint.Render(found)
return self.matcher.Detect(comparison, results)
def Validate(self):
"""Check the test set is well constructed."""
ValidateMultiple(self.target, "Probe has invalid target")
self.baseliner.Validate()
self.handler.Validate()
self.hint.Validate()
class Filter(structs.RDFProtoStruct):
"""Generic filter to provide an interface for different types of filter."""
protobuf = checks_pb2.Filter
def __init__(self, initializer=None, age=None, **kwargs):
# FIXME(sebastianw): Probe seems to pass in the configuration for filters
# as a dict in initializer, rather than as kwargs.
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
super(Filter, self).__init__(initializer=initializer, age=age, **conf)
filter_name = self.type or "Filter"
self._filter = filters.Filter.GetFilter(filter_name)
def Parse(self, rdf_data):
"""Process rdf data through the filter.
Filters sift data according to filter rules. Data that passes the filter
rule is kept, other data is dropped.
If no filter method is provided, the data is returned as a list.
Otherwise, a list of parsed data items are returned.
Args:
rdf_data: Host data that has already been processed by a Parser into RDF.
Returns:
A list of data items that matched the filter rules.
"""
if not self._filter:
if isinstance(rdf_data, basestring):
return [rdf_data]
return list(rdf_data)
# TODO(user): filters need to return data as a list if no expression
# is provided.
return [x for x in self._filter.Parse(rdf_data, self.expression)]
def Validate(self):
"""The filter exists, and has valid filter and hint expressions."""
if self.type not in filters.Filter.classes:
raise checks.DefinitionError("Undefined filter type %s" % self.type)
self._filter.Validate(self.expression)
ValidateMultiple(self.hint, "Filter has invalid hint")
class Hint(structs.RDFProtoStruct):
"""Human-formatted descriptions of problems, fixes and findings."""
protobuf = checks_pb2.Hint
def __init__(self, initializer=None, age=None, reformat=True, **kwargs):
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
super(Hint, self).__init__(initializer=initializer, age=age, **conf)
if not self.max_results:
self.max_results = config_lib.CONFIG.Get("Checks.max_results")
if reformat:
self.hinter = hints.Hinter(self.format)
else:
self.hinter = hints.Hinter()
def Render(self, rdf_data):
"""Processes data according to formatting rules."""
report_data = rdf_data[:self.max_results]
results = [self.hinter.Render(rdf) for rdf in report_data]
extra = len(rdf_data) - len(report_data)
if extra > 0:
results.append("...plus another %d issues." % extra)
return results
def Explanation(self, state):
"""Creates an anomaly explanation string."""
if self.problem:
return "%s: %s" % (state, self.problem)
def Validate(self):
"""Ensures that required values are set and formatting rules compile."""
# TODO(user): Default format string.
if self.problem:
pass
|
apache-2.0
| -1,698,354,144,586,011,000 | 1,552,945,770,072,696,300 | 32.398936 | 80 | 0.679328 | false |
CloudBoltSoftware/cloudbolt-forge
|
ui_extensions/veeam_admin_extension/restore_backup.py
|
1
|
1717
|
import requests
import time
from xml.dom import minidom
from common.methods import set_progress
from xui.veeam.veeam_admin import VeeamManager
def run(server, *args, **kwargs):
set_progress(f"Starting Veeam Backup restoration... ")
veeam = VeeamManager()
server_ci = veeam.get_connection_info()
url = f'http://{server_ci.ip}:9399/api/vmRestorePoints/' + \
kwargs.get('restore_point_href') + '?action=restore'
session_id = veeam.get_veeam_server_session_id()
header = {"X-RestSvcSessionId": session_id}
response = requests.post(url=url, headers=header)
task = minidom.parseString(response.content.decode('utf-8'))
items = task.getElementsByTagName('Task')[0].attributes.items()
restoration_url = [item for item in items if item[0] == 'Href'][0][-1]
def check_state():
response = requests.get(restoration_url, headers=header)
dom = minidom.parseString(response.content.decode('utf-8'))
state = dom.getElementsByTagName('State')[0]
child = state.firstChild
return child
# Wait until the restoration to completed.
while check_state().data == 'Running':
# wait
set_progress("Waiting for restoration to complete...")
time.sleep(10)
if check_state().data == 'Finished':
set_progress("Server restoration completed successfully")
return "SUCCESS", "Server restoration completed successfully", ""
else:
set_progress("Server restoration didn't complete successfully")
return "FAILURE", "", "Server restoration didn't complete successfully"
|
apache-2.0
| -8,074,987,566,808,631,000 | -648,489,074,371,338,100 | 38.022727 | 83 | 0.630169 | false |
sunlianqiang/kbengine
|
kbe/src/lib/python/Lib/test/test_pprint.py
|
72
|
30339
|
# -*- coding: utf-8 -*-
import pprint
import test.support
import unittest
import test.test_set
import random
import collections
import itertools
# list, tuple and dict subclasses that do or don't overwrite __repr__
class list2(list):
pass
class list3(list):
def __repr__(self):
return list.__repr__(self)
class tuple2(tuple):
pass
class tuple3(tuple):
def __repr__(self):
return tuple.__repr__(self)
class set2(set):
pass
class set3(set):
def __repr__(self):
return set.__repr__(self)
class frozenset2(frozenset):
pass
class frozenset3(frozenset):
def __repr__(self):
return frozenset.__repr__(self)
class dict2(dict):
pass
class dict3(dict):
def __repr__(self):
return dict.__repr__(self)
class Unorderable:
def __repr__(self):
return str(id(self))
class QueryTestCase(unittest.TestCase):
def setUp(self):
self.a = list(range(100))
self.b = list(range(200))
self.a[-12] = self.b
def test_basic(self):
# Verify .isrecursive() and .isreadable() w/o recursion
pp = pprint.PrettyPrinter()
for safe in (2, 2.0, 2j, "abc", [3], (2,2), {3: 3}, "yaddayadda",
self.a, self.b):
# module-level convenience functions
self.assertFalse(pprint.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
self.assertTrue(pprint.isreadable(safe),
"expected isreadable for %r" % (safe,))
# PrettyPrinter methods
self.assertFalse(pp.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
self.assertTrue(pp.isreadable(safe),
"expected isreadable for %r" % (safe,))
def test_knotted(self):
# Verify .isrecursive() and .isreadable() w/ recursion
# Tie a knot.
self.b[67] = self.a
# Messy dict.
self.d = {}
self.d[0] = self.d[1] = self.d[2] = self.d
pp = pprint.PrettyPrinter()
for icky in self.a, self.b, self.d, (self.d, self.d):
self.assertTrue(pprint.isrecursive(icky), "expected isrecursive")
self.assertFalse(pprint.isreadable(icky), "expected not isreadable")
self.assertTrue(pp.isrecursive(icky), "expected isrecursive")
self.assertFalse(pp.isreadable(icky), "expected not isreadable")
# Break the cycles.
self.d.clear()
del self.a[:]
del self.b[:]
for safe in self.a, self.b, self.d, (self.d, self.d):
# module-level convenience functions
self.assertFalse(pprint.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
self.assertTrue(pprint.isreadable(safe),
"expected isreadable for %r" % (safe,))
# PrettyPrinter methods
self.assertFalse(pp.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
self.assertTrue(pp.isreadable(safe),
"expected isreadable for %r" % (safe,))
def test_unreadable(self):
# Not recursive but not readable anyway
pp = pprint.PrettyPrinter()
for unreadable in type(3), pprint, pprint.isrecursive:
# module-level convenience functions
self.assertFalse(pprint.isrecursive(unreadable),
"expected not isrecursive for %r" % (unreadable,))
self.assertFalse(pprint.isreadable(unreadable),
"expected not isreadable for %r" % (unreadable,))
# PrettyPrinter methods
self.assertFalse(pp.isrecursive(unreadable),
"expected not isrecursive for %r" % (unreadable,))
self.assertFalse(pp.isreadable(unreadable),
"expected not isreadable for %r" % (unreadable,))
def test_same_as_repr(self):
# Simple objects, small containers and classes that overwrite __repr__
# For those the result should be the same as repr().
# Ahem. The docs don't say anything about that -- this appears to
# be testing an implementation quirk. Starting in Python 2.5, it's
# not true for dicts: pprint always sorts dicts by key now; before,
# it sorted a dict display if and only if the display required
# multiple lines. For that reason, dicts with more than one element
# aren't tested here.
for simple in (0, 0, 0+0j, 0.0, "", b"",
(), tuple2(), tuple3(),
[], list2(), list3(),
set(), set2(), set3(),
frozenset(), frozenset2(), frozenset3(),
{}, dict2(), dict3(),
self.assertTrue, pprint,
-6, -6, -6-6j, -1.5, "x", b"x", (3,), [3], {3: 6},
(1,2), [3,4], {5: 6},
tuple2((1,2)), tuple3((1,2)), tuple3(range(100)),
[3,4], list2([3,4]), list3([3,4]), list3(range(100)),
set({7}), set2({7}), set3({7}),
frozenset({8}), frozenset2({8}), frozenset3({8}),
dict2({5: 6}), dict3({5: 6}),
range(10, -11, -1)
):
native = repr(simple)
self.assertEqual(pprint.pformat(simple), native)
self.assertEqual(pprint.pformat(simple, width=1, indent=0)
.replace('\n', ' '), native)
self.assertEqual(pprint.saferepr(simple), native)
def test_basic_line_wrap(self):
# verify basic line-wrapping operation
o = {'RPM_cal': 0,
'RPM_cal2': 48059,
'Speed_cal': 0,
'controldesk_runtime_us': 0,
'main_code_runtime_us': 0,
'read_io_runtime_us': 0,
'write_io_runtime_us': 43690}
exp = """\
{'RPM_cal': 0,
'RPM_cal2': 48059,
'Speed_cal': 0,
'controldesk_runtime_us': 0,
'main_code_runtime_us': 0,
'read_io_runtime_us': 0,
'write_io_runtime_us': 43690}"""
for type in [dict, dict2]:
self.assertEqual(pprint.pformat(type(o)), exp)
o = range(100)
exp = '[%s]' % ',\n '.join(map(str, o))
for type in [list, list2]:
self.assertEqual(pprint.pformat(type(o)), exp)
o = tuple(range(100))
exp = '(%s)' % ',\n '.join(map(str, o))
for type in [tuple, tuple2]:
self.assertEqual(pprint.pformat(type(o)), exp)
# indent parameter
o = range(100)
exp = '[ %s]' % ',\n '.join(map(str, o))
for type in [list, list2]:
self.assertEqual(pprint.pformat(type(o), indent=4), exp)
def test_nested_indentations(self):
o1 = list(range(10))
o2 = dict(first=1, second=2, third=3)
o = [o1, o2]
expected = """\
[ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
{ 'first': 1,
'second': 2,
'third': 3}]"""
self.assertEqual(pprint.pformat(o, indent=4, width=42), expected)
def test_sorted_dict(self):
# Starting in Python 2.5, pprint sorts dict displays by key regardless
# of how small the dictionary may be.
# Before the change, on 32-bit Windows pformat() gave order
# 'a', 'c', 'b' here, so this test failed.
d = {'a': 1, 'b': 1, 'c': 1}
self.assertEqual(pprint.pformat(d), "{'a': 1, 'b': 1, 'c': 1}")
self.assertEqual(pprint.pformat([d, d]),
"[{'a': 1, 'b': 1, 'c': 1}, {'a': 1, 'b': 1, 'c': 1}]")
# The next one is kind of goofy. The sorted order depends on the
# alphabetic order of type names: "int" < "str" < "tuple". Before
# Python 2.5, this was in the test_same_as_repr() test. It's worth
# keeping around for now because it's one of few tests of pprint
# against a crazy mix of types.
self.assertEqual(pprint.pformat({"xy\tab\n": (3,), 5: [[]], (): {}}),
r"{5: [[]], 'xy\tab\n': (3,), (): {}}")
def test_ordered_dict(self):
words = 'the quick brown fox jumped over a lazy dog'.split()
d = collections.OrderedDict(zip(words, itertools.count()))
self.assertEqual(pprint.pformat(d),
"""\
{'the': 0,
'quick': 1,
'brown': 2,
'fox': 3,
'jumped': 4,
'over': 5,
'a': 6,
'lazy': 7,
'dog': 8}""")
def test_subclassing(self):
o = {'names with spaces': 'should be presented using repr()',
'others.should.not.be': 'like.this'}
exp = """\
{'names with spaces': 'should be presented using repr()',
others.should.not.be: like.this}"""
self.assertEqual(DottedPrettyPrinter().pformat(o), exp)
def test_set_reprs(self):
self.assertEqual(pprint.pformat(set()), 'set()')
self.assertEqual(pprint.pformat(set(range(3))), '{0, 1, 2}')
self.assertEqual(pprint.pformat(set(range(7)), width=20), '''\
{0,
1,
2,
3,
4,
5,
6}''')
self.assertEqual(pprint.pformat(set2(range(7)), width=20), '''\
set2({0,
1,
2,
3,
4,
5,
6})''')
self.assertEqual(pprint.pformat(set3(range(7)), width=20),
'set3({0, 1, 2, 3, 4, 5, 6})')
self.assertEqual(pprint.pformat(frozenset()), 'frozenset()')
self.assertEqual(pprint.pformat(frozenset(range(3))),
'frozenset({0, 1, 2})')
self.assertEqual(pprint.pformat(frozenset(range(7)), width=20), '''\
frozenset({0,
1,
2,
3,
4,
5,
6})''')
self.assertEqual(pprint.pformat(frozenset2(range(7)), width=20), '''\
frozenset2({0,
1,
2,
3,
4,
5,
6})''')
self.assertEqual(pprint.pformat(frozenset3(range(7)), width=20),
'frozenset3({0, 1, 2, 3, 4, 5, 6})')
@unittest.expectedFailure
#See http://bugs.python.org/issue13907
@test.support.cpython_only
def test_set_of_sets_reprs(self):
# This test creates a complex arrangement of frozensets and
# compares the pretty-printed repr against a string hard-coded in
# the test. The hard-coded repr depends on the sort order of
# frozensets.
#
# However, as the docs point out: "Since sets only define
# partial ordering (subset relationships), the output of the
# list.sort() method is undefined for lists of sets."
#
# In a nutshell, the test assumes frozenset({0}) will always
# sort before frozenset({1}), but:
#
# >>> frozenset({0}) < frozenset({1})
# False
# >>> frozenset({1}) < frozenset({0})
# False
#
# Consequently, this test is fragile and
# implementation-dependent. Small changes to Python's sort
# algorithm cause the test to fail when it should pass.
# XXX Or changes to the dictionary implmentation...
cube_repr_tgt = """\
{frozenset(): frozenset({frozenset({2}), frozenset({0}), frozenset({1})}),
frozenset({0}): frozenset({frozenset(),
frozenset({0, 2}),
frozenset({0, 1})}),
frozenset({1}): frozenset({frozenset(),
frozenset({1, 2}),
frozenset({0, 1})}),
frozenset({2}): frozenset({frozenset(),
frozenset({1, 2}),
frozenset({0, 2})}),
frozenset({1, 2}): frozenset({frozenset({2}),
frozenset({1}),
frozenset({0, 1, 2})}),
frozenset({0, 2}): frozenset({frozenset({2}),
frozenset({0}),
frozenset({0, 1, 2})}),
frozenset({0, 1}): frozenset({frozenset({0}),
frozenset({1}),
frozenset({0, 1, 2})}),
frozenset({0, 1, 2}): frozenset({frozenset({1, 2}),
frozenset({0, 2}),
frozenset({0, 1})})}"""
cube = test.test_set.cube(3)
self.assertEqual(pprint.pformat(cube), cube_repr_tgt)
cubo_repr_tgt = """\
{frozenset({frozenset({0, 2}), frozenset({0})}): frozenset({frozenset({frozenset({0,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
1})}),
frozenset({frozenset(),
frozenset({0})}),
frozenset({frozenset({2}),
frozenset({0,
2})})}),
frozenset({frozenset({0, 1}), frozenset({1})}): frozenset({frozenset({frozenset({0,
1}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
1})}),
frozenset({frozenset({1}),
frozenset({1,
2})}),
frozenset({frozenset(),
frozenset({1})})}),
frozenset({frozenset({1, 2}), frozenset({1})}): frozenset({frozenset({frozenset({1,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({2}),
frozenset({1,
2})}),
frozenset({frozenset(),
frozenset({1})}),
frozenset({frozenset({1}),
frozenset({0,
1})})}),
frozenset({frozenset({1, 2}), frozenset({2})}): frozenset({frozenset({frozenset({1,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({1}),
frozenset({1,
2})}),
frozenset({frozenset({2}),
frozenset({0,
2})}),
frozenset({frozenset(),
frozenset({2})})}),
frozenset({frozenset(), frozenset({0})}): frozenset({frozenset({frozenset({0}),
frozenset({0,
1})}),
frozenset({frozenset({0}),
frozenset({0,
2})}),
frozenset({frozenset(),
frozenset({1})}),
frozenset({frozenset(),
frozenset({2})})}),
frozenset({frozenset(), frozenset({1})}): frozenset({frozenset({frozenset(),
frozenset({0})}),
frozenset({frozenset({1}),
frozenset({1,
2})}),
frozenset({frozenset(),
frozenset({2})}),
frozenset({frozenset({1}),
frozenset({0,
1})})}),
frozenset({frozenset({2}), frozenset()}): frozenset({frozenset({frozenset({2}),
frozenset({1,
2})}),
frozenset({frozenset(),
frozenset({0})}),
frozenset({frozenset(),
frozenset({1})}),
frozenset({frozenset({2}),
frozenset({0,
2})})}),
frozenset({frozenset({0, 1, 2}), frozenset({0, 1})}): frozenset({frozenset({frozenset({1,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
1})}),
frozenset({frozenset({1}),
frozenset({0,
1})})}),
frozenset({frozenset({0}), frozenset({0, 1})}): frozenset({frozenset({frozenset(),
frozenset({0})}),
frozenset({frozenset({0,
1}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
2})}),
frozenset({frozenset({1}),
frozenset({0,
1})})}),
frozenset({frozenset({2}), frozenset({0, 2})}): frozenset({frozenset({frozenset({0,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({2}),
frozenset({1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
2})}),
frozenset({frozenset(),
frozenset({2})})}),
frozenset({frozenset({0, 1, 2}), frozenset({0, 2})}): frozenset({frozenset({frozenset({1,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0,
1}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
2})}),
frozenset({frozenset({2}),
frozenset({0,
2})})}),
frozenset({frozenset({1, 2}), frozenset({0, 1, 2})}): frozenset({frozenset({frozenset({0,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0,
1}),
frozenset({0,
1,
2})}),
frozenset({frozenset({2}),
frozenset({1,
2})}),
frozenset({frozenset({1}),
frozenset({1,
2})})})}"""
cubo = test.test_set.linegraph(cube)
self.assertEqual(pprint.pformat(cubo), cubo_repr_tgt)
def test_depth(self):
nested_tuple = (1, (2, (3, (4, (5, 6)))))
nested_dict = {1: {2: {3: {4: {5: {6: 6}}}}}}
nested_list = [1, [2, [3, [4, [5, [6, []]]]]]]
self.assertEqual(pprint.pformat(nested_tuple), repr(nested_tuple))
self.assertEqual(pprint.pformat(nested_dict), repr(nested_dict))
self.assertEqual(pprint.pformat(nested_list), repr(nested_list))
lv1_tuple = '(1, (...))'
lv1_dict = '{1: {...}}'
lv1_list = '[1, [...]]'
self.assertEqual(pprint.pformat(nested_tuple, depth=1), lv1_tuple)
self.assertEqual(pprint.pformat(nested_dict, depth=1), lv1_dict)
self.assertEqual(pprint.pformat(nested_list, depth=1), lv1_list)
def test_sort_unorderable_values(self):
# Issue 3976: sorted pprints fail for unorderable values.
n = 20
keys = [Unorderable() for i in range(n)]
random.shuffle(keys)
skeys = sorted(keys, key=id)
clean = lambda s: s.replace(' ', '').replace('\n','')
self.assertEqual(clean(pprint.pformat(set(keys))),
'{' + ','.join(map(repr, skeys)) + '}')
self.assertEqual(clean(pprint.pformat(frozenset(keys))),
'frozenset({' + ','.join(map(repr, skeys)) + '})')
self.assertEqual(clean(pprint.pformat(dict.fromkeys(keys))),
'{' + ','.join('%r:None' % k for k in skeys) + '}')
# Issue 10017: TypeError on user-defined types as dict keys.
self.assertEqual(pprint.pformat({Unorderable: 0, 1: 0}),
'{1: 0, ' + repr(Unorderable) +': 0}')
# Issue 14998: TypeError on tuples with NoneTypes as dict keys.
keys = [(1,), (None,)]
self.assertEqual(pprint.pformat(dict.fromkeys(keys, 0)),
'{%r: 0, %r: 0}' % tuple(sorted(keys, key=id)))
def test_str_wrap(self):
# pprint tries to wrap strings intelligently
fox = 'the quick brown fox jumped over a lazy dog'
self.assertEqual(pprint.pformat(fox, width=20), """\
'the quick brown '
'fox jumped over '
'a lazy dog'""")
self.assertEqual(pprint.pformat({'a': 1, 'b': fox, 'c': 2},
width=26), """\
{'a': 1,
'b': 'the quick brown '
'fox jumped over '
'a lazy dog',
'c': 2}""")
# With some special characters
# - \n always triggers a new line in the pprint
# - \t and \n are escaped
# - non-ASCII is allowed
# - an apostrophe doesn't disrupt the pprint
special = "Portons dix bons \"whiskys\"\nà l'avocat goujat\t qui fumait au zoo"
self.assertEqual(pprint.pformat(special, width=20), """\
'Portons dix bons '
'"whiskys"\\n'
"à l'avocat "
'goujat\\t qui '
'fumait au zoo'""")
# An unwrappable string is formatted as its repr
unwrappable = "x" * 100
self.assertEqual(pprint.pformat(unwrappable, width=80), repr(unwrappable))
self.assertEqual(pprint.pformat(''), "''")
# Check that the pprint is a usable repr
special *= 10
for width in range(3, 40):
formatted = pprint.pformat(special, width=width)
self.assertEqual(eval("(" + formatted + ")"), special)
def test_compact(self):
o = ([list(range(i * i)) for i in range(5)] +
[list(range(i)) for i in range(6)])
expected = """\
[[], [0], [0, 1, 2, 3],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15],
[], [0], [0, 1], [0, 1, 2], [0, 1, 2, 3],
[0, 1, 2, 3, 4]]"""
self.assertEqual(pprint.pformat(o, width=48, compact=True), expected)
class DottedPrettyPrinter(pprint.PrettyPrinter):
def format(self, object, context, maxlevels, level):
if isinstance(object, str):
if ' ' in object:
return repr(object), 1, 0
else:
return object, 0, 0
else:
return pprint.PrettyPrinter.format(
self, object, context, maxlevels, level)
def test_main():
test.support.run_unittest(QueryTestCase)
if __name__ == "__main__":
test_main()
|
lgpl-3.0
| 406,260,979,765,953,100 | -2,368,302,945,321,009,000 | 49.393688 | 99 | 0.351815 | false |
MinimalOS/external_skia
|
platform_tools/android/tests/utils.py
|
146
|
1155
|
#!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Common code for tests.
"""
import filecmp
import os
EXPECTATIONS_DIR = os.path.join(os.path.dirname(__file__), 'expectations')
def compare_to_expectation(actual_name, expectation_name, assert_true,
msg=None):
"""Check that a generated file matches its expectation in EXPECTATIONS_DIR.
Assert that the generated file and expectation file are identical.
Args:
actual_name: Full path to the test file.
expectation_name: Basename of the expectations file within which
to compare. The file is expected to be in
platform_tools/android/tests/expectations.
assert_true: function for asserting a statement is True
Args:
condition: statement to check for True.
msg: message to print if the files are not equal.
msg: Message to pass to assert_true.
"""
full_expectations_path = os.path.join(EXPECTATIONS_DIR, expectation_name)
assert_true(filecmp.cmp(actual_name, full_expectations_path), msg)
|
bsd-3-clause
| -3,887,166,187,032,921,000 | -7,755,460,419,658,228,000 | 31.083333 | 77 | 0.697835 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.