code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from binding import *
from src.namespace import llvm
from src.Value import MDNode
from src.Instruction import Instruction, TerminatorInst
llvm.includes.add('llvm/Transforms/Utils/BasicBlockUtils.h')
SplitBlockAndInsertIfThen = llvm.Function('SplitBlockAndInsertIfThen',
ptr(TerminatorInst),
ptr(Instruction), # cmp
cast(bool, Bool), # unreachable
ptr(MDNode)) # branchweights
ReplaceInstWithInst = llvm.Function('ReplaceInstWithInst',
Void,
ptr(Instruction), # from
ptr(Instruction)) # to
| llvmpy/llvmpy | llvmpy/src/Transforms/Utils/BasicBlockUtils.py | Python | bsd-3-clause | 767 |
from __future__ import absolute_import
import mock
import os
from django.conf import settings
from sentry_sdk import Hub
TEST_ROOT = os.path.normpath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir, "tests")
)
def pytest_configure(config):
# HACK: Only needed for testing!
os.environ.setdefault("_SENTRY_SKIP_CONFIGURATION", "1")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sentry.conf.server")
# override docs which are typically synchronized from an upstream server
# to ensure tests are consistent
os.environ.setdefault(
"INTEGRATION_DOC_FOLDER", os.path.join(TEST_ROOT, "fixtures", "integration-docs")
)
from sentry.utils import integrationdocs
integrationdocs.DOC_FOLDER = os.environ["INTEGRATION_DOC_FOLDER"]
if not settings.configured:
# only configure the db if its not already done
test_db = os.environ.get("DB", "postgres")
if test_db == "postgres":
settings.DATABASES["default"].update(
{
"ENGINE": "sentry.db.postgres",
"USER": "postgres",
"NAME": "sentry",
"HOST": "127.0.0.1",
}
)
# postgres requires running full migration all the time
# since it has to install stored functions which come from
# an actual migration.
else:
raise RuntimeError("oops, wrong database: %r" % test_db)
settings.TEMPLATE_DEBUG = True
# Disable static compiling in tests
settings.STATIC_BUNDLES = {}
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + ("tests",)
# Need a predictable key for tests that involve checking signatures
settings.SENTRY_PUBLIC = False
if not settings.SENTRY_CACHE:
settings.SENTRY_CACHE = "sentry.cache.django.DjangoCache"
settings.SENTRY_CACHE_OPTIONS = {}
# This speeds up the tests considerably, pbkdf2 is by design, slow.
settings.PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
settings.AUTH_PASSWORD_VALIDATORS = []
# Replace real sudo middleware with our mock sudo middleware
# to assert that the user is always in sudo mode
middleware = list(settings.MIDDLEWARE_CLASSES)
sudo = middleware.index("sentry.middleware.sudo.SudoMiddleware")
middleware[sudo] = "sentry.testutils.middleware.SudoMiddleware"
settings.MIDDLEWARE_CLASSES = tuple(middleware)
settings.SENTRY_OPTIONS["cloudflare.secret-key"] = "cloudflare-secret-key"
# enable draft features
settings.SENTRY_OPTIONS["mail.enable-replies"] = True
settings.SENTRY_ALLOW_ORIGIN = "*"
settings.SENTRY_TSDB = "sentry.tsdb.inmemory.InMemoryTSDB"
settings.SENTRY_TSDB_OPTIONS = {}
if settings.SENTRY_NEWSLETTER == "sentry.newsletter.base.Newsletter":
settings.SENTRY_NEWSLETTER = "sentry.newsletter.dummy.DummyNewsletter"
settings.SENTRY_NEWSLETTER_OPTIONS = {}
settings.BROKER_BACKEND = "memory"
settings.BROKER_URL = None
settings.CELERY_ALWAYS_EAGER = False
settings.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
settings.DEBUG_VIEWS = True
settings.SENTRY_ENCRYPTION_SCHEMES = ()
settings.DISABLE_RAVEN = True
settings.CACHES = {"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"}}
if os.environ.get("USE_SNUBA", False):
settings.SENTRY_SEARCH = "sentry.search.snuba.SnubaSearchBackend"
settings.SENTRY_TAGSTORE = "sentry.tagstore.snuba.SnubaCompatibilityTagStorage"
settings.SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB"
settings.SENTRY_EVENTSTREAM = "sentry.eventstream.snuba.SnubaEventStream"
if not hasattr(settings, "SENTRY_OPTIONS"):
settings.SENTRY_OPTIONS = {}
settings.SENTRY_OPTIONS.update(
{
"redis.clusters": {"default": {"hosts": {0: {"db": 9}}}},
"mail.backend": "django.core.mail.backends.locmem.EmailBackend",
"system.url-prefix": "http://testserver",
"slack.client-id": "slack-client-id",
"slack.client-secret": "slack-client-secret",
"slack.verification-token": "slack-verification-token",
"github-app.name": "sentry-test-app",
"github-app.client-id": "github-client-id",
"github-app.client-secret": "github-client-secret",
"vsts.client-id": "vsts-client-id",
"vsts.client-secret": "vsts-client-secret",
}
)
# django mail uses socket.getfqdn which doesn't play nice if our
# networking isn't stable
patcher = mock.patch("socket.getfqdn", return_value="localhost")
patcher.start()
if not settings.SOUTH_TESTS_MIGRATE:
settings.INSTALLED_APPS = tuple(i for i in settings.INSTALLED_APPS if i != "south")
from sentry.runner.initializer import (
bootstrap_options,
configure_structlog,
initialize_receivers,
fix_south,
bind_cache_to_option_store,
setup_services,
)
bootstrap_options(settings)
configure_structlog()
fix_south(settings)
import django
if hasattr(django, "setup"):
django.setup()
bind_cache_to_option_store()
initialize_receivers()
setup_services()
register_extensions()
from sentry.utils.redis import clusters
with clusters.get("default").all() as client:
client.flushdb()
# force celery registration
from sentry.celery import app # NOQA
# disable DISALLOWED_IPS
from sentry import http
http.DISALLOWED_IPS = set()
def register_extensions():
from sentry.plugins import plugins
from sentry.plugins.utils import TestIssuePlugin2
plugins.register(TestIssuePlugin2)
from sentry import integrations
from sentry.integrations.bitbucket import BitbucketIntegrationProvider
from sentry.integrations.example import (
ExampleIntegrationProvider,
AliasedIntegrationProvider,
ExampleRepositoryProvider,
)
from sentry.integrations.github import GitHubIntegrationProvider
from sentry.integrations.github_enterprise import GitHubEnterpriseIntegrationProvider
from sentry.integrations.gitlab import GitlabIntegrationProvider
from sentry.integrations.jira import JiraIntegrationProvider
from sentry.integrations.jira_server import JiraServerIntegrationProvider
from sentry.integrations.slack import SlackIntegrationProvider
from sentry.integrations.vsts import VstsIntegrationProvider
from sentry.integrations.vsts_extension import VstsExtensionIntegrationProvider
integrations.register(BitbucketIntegrationProvider)
integrations.register(ExampleIntegrationProvider)
integrations.register(AliasedIntegrationProvider)
integrations.register(GitHubIntegrationProvider)
integrations.register(GitHubEnterpriseIntegrationProvider)
integrations.register(GitlabIntegrationProvider)
integrations.register(JiraIntegrationProvider)
integrations.register(JiraServerIntegrationProvider)
integrations.register(SlackIntegrationProvider)
integrations.register(VstsIntegrationProvider)
integrations.register(VstsExtensionIntegrationProvider)
from sentry.plugins import bindings
from sentry.plugins.providers.dummy import DummyRepositoryProvider
bindings.add("repository.provider", DummyRepositoryProvider, id="dummy")
bindings.add(
"integration-repository.provider", ExampleRepositoryProvider, id="integrations:example"
)
def pytest_runtest_teardown(item):
if not os.environ.get("USE_SNUBA", False):
from sentry import tsdb
# TODO(dcramer): this only works if this is the correct tsdb backend
tsdb.flush()
# XXX(dcramer): only works with DummyNewsletter
from sentry import newsletter
if hasattr(newsletter.backend, "clear"):
newsletter.backend.clear()
from sentry.utils.redis import clusters
with clusters.get("default").all() as client:
client.flushdb()
from celery.task.control import discard_all
discard_all()
from sentry.models import OrganizationOption, ProjectOption, UserOption
for model in (OrganizationOption, ProjectOption, UserOption):
model.objects.clear_local_cache()
Hub.main.bind_client(None)
| mvaled/sentry | src/sentry/utils/pytest/sentry.py | Python | bsd-3-clause | 8,427 |
import pytest
from django.db import connection, IntegrityError
from .models import MyTree
def flush_constraints():
# the default db setup is to have constraints DEFERRED.
# So IntegrityErrors only happen when the transaction commits.
# Django's testcase thing does eventually flush the constraints but to
# actually test it *within* a testcase we have to flush it manually.
connection.cursor().execute("SET CONSTRAINTS ALL IMMEDIATE")
def test_node_creation_simple(db):
MyTree.objects.create(label='root1')
MyTree.objects.create(label='root2')
def test_node_creation_with_no_label(db):
# You need a label
with pytest.raises(ValueError):
MyTree.objects.create(label='')
with pytest.raises(ValueError):
MyTree.objects.create(label=None)
with pytest.raises(ValueError):
MyTree.objects.create()
def test_root_node_already_exists(db):
MyTree.objects.create(label='root1')
with pytest.raises(IntegrityError):
MyTree.objects.create(label='root1')
def test_same_label_but_different_parent(db):
root1 = MyTree.objects.create(label='root1')
MyTree.objects.create(label='root1', parent=root1)
def test_same_label_as_sibling(db):
root1 = MyTree.objects.create(label='root1')
MyTree.objects.create(label='child', parent=root1)
with pytest.raises(IntegrityError):
MyTree.objects.create(label='child', parent=root1)
def test_parent_is_self_errors(db):
root1 = MyTree.objects.create(label='root1')
root1.parent = root1
with pytest.raises(IntegrityError):
root1.save()
flush_constraints()
def test_parent_is_remote_ancestor_errors(db):
root1 = MyTree.objects.create(label='root1')
child2 = MyTree.objects.create(label='child2', parent=root1)
desc3 = MyTree.objects.create(label='desc3', parent=child2)
with pytest.raises(IntegrityError):
# To test this integrity error, have to update table without calling save()
# (because save() changes `ltree` to match `parent_id`)
MyTree.objects.filter(pk=desc3.pk).update(parent=root1)
flush_constraints()
def test_parent_is_descendant_errors(db):
root1 = MyTree.objects.create(label='root1')
child2 = MyTree.objects.create(label='child2', parent=root1)
desc3 = MyTree.objects.create(label='desc3', parent=child2)
child2.parent = desc3
with pytest.raises(IntegrityError):
child2.save()
flush_constraints()
| craigds/django-mpathy | tests/test_db_consistency.py | Python | bsd-3-clause | 2,474 |
from django.shortcuts import render_to_response
from django.template import RequestContext
from markitup import settings
from markitup.markup import filter_func
from markitup.sanitize import sanitize_html
def apply_filter(request):
cleaned_data = sanitize_html(request.POST.get('data', ''), strip=True)
markup = filter_func(cleaned_data)
return render_to_response( 'markitup/preview.html',
{'preview': markup},
context_instance=RequestContext(request))
| thoslin/django-markitup | markitup/views.py | Python | bsd-3-clause | 527 |
# -*- coding: utf-8 -*-
"""
Doctest runner for 'birdhousebuilder.recipe.adagucserver'.
"""
__docformat__ = 'restructuredtext'
import os
import sys
import unittest
import zc.buildout.tests
import zc.buildout.testing
from zope.testing import doctest, renormalizing
optionflags = (doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_ONLY_FIRST_FAILURE)
def setUp(test):
zc.buildout.testing.buildoutSetUp(test)
# Install the recipe in develop mode
zc.buildout.testing.install_develop('birdhousebuilder.recipe.adagucserver', test)
test.globs['os'] = os
test.globs['sys'] = sys
test.globs['test_dir'] = os.path.dirname(__file__)
def test_suite():
suite = unittest.TestSuite((
doctest.DocFileSuite(
'../../../../README.rst',
setUp=setUp,
tearDown=zc.buildout.testing.buildoutTearDown,
optionflags=optionflags,
checker=renormalizing.RENormalizing([
# If want to clean up the doctest output you
# can register additional regexp normalizers
# here. The format is a two-tuple with the RE
# as the first item and the replacement as the
# second item, e.g.
# (re.compile('my-[rR]eg[eE]ps'), 'my-regexps')
zc.buildout.testing.normalize_path,
]),
),
))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| bird-house/birdhousebuilder.recipe.adagucserver | birdhousebuilder/recipe/adagucserver/tests/test_docs.py | Python | bsd-3-clause | 1,620 |
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
API Issues to work out:
- MatrixTransform and STTransform both have 'scale' and 'translate'
attributes, but they are used in very different ways. It would be nice
to keep this consistent, but how?
- Need a transform.map_rect function that returns the bounding rectangle of
a rect after transformation. Non-linear transforms might need to work
harder at this, but we can provide a default implementation that
works by mapping a selection of points across a grid within the original
rect.
"""
from __future__ import division
from ..shaders import Function
from ...util.event import EventEmitter
class BaseTransform(object):
"""
BaseTransform is a base class that defines a pair of complementary
coordinate mapping functions in both python and GLSL.
All BaseTransform subclasses define map() and imap() methods that map
an object through the forward or inverse transformation, respectively.
The two class variables glsl_map and glsl_imap are instances of
shaders.Function that define the forward- and inverse-mapping GLSL
function code.
Optionally, an inverse() method returns a new transform performing the
inverse mapping.
Note that although all classes should define both map() and imap(), it
is not necessarily the case that imap(map(x)) == x; there may be instances
where the inverse mapping is ambiguous or otherwise meaningless.
"""
glsl_map = None # Must be GLSL code
glsl_imap = None
# Flags used to describe the transformation. Subclasses should define each
# as True or False.
# (usually used for making optimization decisions)
# If True, then for any 3 colinear points, the
# transformed points will also be colinear.
Linear = None
# The transformation's effect on one axis is independent
# of the input position along any other axis.
Orthogonal = None
# If True, then the distance between two points is the
# same as the distance between the transformed points.
NonScaling = None
# Scale factors are applied equally to all axes.
Isometric = None
def __init__(self):
self._inverse = None
self._dynamic = False
self.changed = EventEmitter(source=self, type='transform_changed')
if self.glsl_map is not None:
self._shader_map = Function(self.glsl_map)
if self.glsl_imap is not None:
self._shader_imap = Function(self.glsl_imap)
def map(self, obj):
"""
Return *obj* mapped through the forward transformation.
Parameters
----------
obj : tuple (x,y) or (x,y,z)
array with shape (..., 2) or (..., 3)
"""
raise NotImplementedError()
def imap(self, obj):
"""
Return *obj* mapped through the inverse transformation.
Parameters
----------
obj : tuple (x,y) or (x,y,z)
array with shape (..., 2) or (..., 3)
"""
raise NotImplementedError()
@property
def inverse(self):
""" The inverse of this transform.
"""
if self._inverse is None:
self._inverse = InverseTransform(self)
return self._inverse
@property
def dynamic(self):
"""Boolean flag that indicates whether this transform is expected to
change frequently.
Transforms that are flagged as dynamic will not be collapsed in
``ChainTransform.simplified``. This allows changes to the transform
to propagate through the chain without requiring the chain to be
re-simplified.
"""
return self._dynamic
@dynamic.setter
def dynamic(self, d):
self._dynamic = d
def shader_map(self):
"""
Return a shader Function that accepts only a single vec4 argument
and defines new attributes / uniforms supplying the Function with
any static input.
"""
return self._shader_map
def shader_imap(self):
"""
see shader_map.
"""
return self._shader_imap
def _shader_object(self):
""" This method allows transforms to be assigned directly to shader
template variables.
Example::
code = 'void main() { gl_Position = $transform($position); }'
func = shaders.Function(code)
tr = STTransform()
func['transform'] = tr # use tr's forward mapping for $function
"""
return self.shader_map()
def update(self, *args):
"""
Called to inform any listeners that this transform has changed.
"""
self.changed(*args)
def __mul__(self, tr):
"""
Transform multiplication returns a new transform that is equivalent to
the two operands performed in series.
By default, multiplying two Transforms `A * B` will return
ChainTransform([A, B]). Subclasses may redefine this operation to
return more optimized results.
To ensure that both operands have a chance to simplify the operation,
all subclasses should follow the same procedure. For `A * B`:
1. A.__mul__(B) attempts to generate an optimized transform product.
2. If that fails, it must:
* return super(A).__mul__(B) OR
* return NotImplemented if the superclass would return an
invalid result.
3. When BaseTransform.__mul__(A, B) is called, it returns
NotImplemented, which causes B.__rmul__(A) to be invoked.
4. B.__rmul__(A) attempts to generate an optimized transform product.
5. If that fails, it must:
* return super(B).__rmul__(A) OR
* return ChainTransform([B, A]) if the superclass would return
an invalid result.
6. When BaseTransform.__rmul__(B, A) is called, ChainTransform([A, B])
is returned.
"""
# switch to __rmul__ attempts.
# Don't use the "return NotImplemted" trick, because that won't work if
# self and tr are of the same type.
return tr.__rmul__(self)
def __rmul__(self, tr):
return ChainTransform([tr, self])
def __repr__(self):
return "<%s at 0x%x>" % (self.__class__.__name__, id(self))
def __del__(self):
# we can remove ourselves from *all* events in this situation.
self.changed.disconnect()
class InverseTransform(BaseTransform):
def __init__(self, transform):
BaseTransform.__init__(self)
self._inverse = transform
self.map = transform.imap
self.imap = transform.map
@property
def Linear(self):
return self._inverse.Linear
@property
def Orthogonal(self):
return self._inverse.Orthogonal
@property
def NonScaling(self):
return self._inverse.NonScaling
@property
def Isometric(self):
return self._inverse.Isometric
@property
def shader_map(self):
return self._inverse.shader_imap
@property
def shader_imap(self):
return self._inverse.shader_map
def __repr__(self):
return ("<Inverse of %r>" % repr(self._inverse))
# import here to avoid import cycle; needed for BaseTransform.__mul__.
from .chain import ChainTransform # noqa
| Eric89GXL/vispy | vispy/visuals/transforms/base_transform.py | Python | bsd-3-clause | 7,578 |
from x86asm import codePackageFromFile
from x86cpToMemory import CpToMemory
from pythonConstants import PythonConstants
import cStringIO
import excmem
def pyasm(scope,s):
cp = codePackageFromFile(cStringIO.StringIO(s),PythonConstants)
mem = CpToMemory(cp)
mem.MakeMemory()
mem.BindPythonFunctions(scope)
| grant-olson/pyasm | __init__.py | Python | bsd-3-clause | 327 |
"""
Custom Authenticator to use MediaWiki OAuth with JupyterHub
Requires `mwoauth` package.
"""
import json
import os
from asyncio import wrap_future
from concurrent.futures import ThreadPoolExecutor
from jupyterhub.handlers import BaseHandler
from jupyterhub.utils import url_path_join
from mwoauth import ConsumerToken
from mwoauth import Handshaker
from mwoauth.tokens import RequestToken
from traitlets import Any
from traitlets import Integer
from traitlets import Unicode
from oauthenticator import OAuthCallbackHandler
from oauthenticator import OAuthenticator
# Name of cookie used to pass auth token between the oauth
# login and authentication phase
AUTH_REQUEST_COOKIE_NAME = 'mw_oauth_request_token_v2'
# Helpers to jsonify/de-jsonify request_token
# It is a named tuple with bytestrings, json.dumps balks
def jsonify(request_token):
return json.dumps(
[
request_token.key,
request_token.secret,
]
)
def dejsonify(js):
key, secret = json.loads(js)
return RequestToken(key, secret)
class MWLoginHandler(BaseHandler):
async def get(self):
consumer_token = ConsumerToken(
self.authenticator.client_id,
self.authenticator.client_secret,
)
handshaker = Handshaker(self.authenticator.mw_index_url, consumer_token)
redirect, request_token = await wrap_future(
self.authenticator.executor.submit(handshaker.initiate)
)
self.set_secure_cookie(
AUTH_REQUEST_COOKIE_NAME,
jsonify(request_token),
expires_days=1,
path=url_path_join(self.base_url, 'hub', 'oauth_callback'),
httponly=True,
)
self.log.info('oauth redirect: %r', redirect)
self.redirect(redirect)
class MWCallbackHandler(OAuthCallbackHandler):
"""
Override OAuthCallbackHandler to take out state parameter handling.
mwoauth doesn't seem to support it for now!
"""
def check_arguments(self):
pass
def get_state_url(self):
return None
class MWOAuthenticator(OAuthenticator):
login_service = 'MediaWiki'
login_handler = MWLoginHandler
callback_handler = MWCallbackHandler
mw_index_url = Unicode(
os.environ.get('MW_INDEX_URL', 'https://meta.wikimedia.org/w/index.php'),
config=True,
help='Full path to index.php of the MW instance to use to log in',
)
executor_threads = Integer(
12,
help="""Number of executor threads.
MediaWiki OAuth requests happen in this thread,
so it is mostly waiting for network replies.
""",
config=True,
)
executor = Any()
def normalize_username(self, username):
"""
Override normalize_username to avoid lowercasing usernames
"""
return username
def _executor_default(self):
return ThreadPoolExecutor(self.executor_threads)
async def authenticate(self, handler, data=None):
consumer_token = ConsumerToken(
self.client_id,
self.client_secret,
)
handshaker = Handshaker(self.mw_index_url, consumer_token)
request_token = dejsonify(handler.get_secure_cookie(AUTH_REQUEST_COOKIE_NAME))
handler.clear_cookie(AUTH_REQUEST_COOKIE_NAME)
access_token = await wrap_future(
self.executor.submit(
handshaker.complete, request_token, handler.request.query
)
)
identity = await wrap_future(
self.executor.submit(handshaker.identify, access_token)
)
if identity and 'username' in identity:
# this shouldn't be necessary anymore,
# but keep for backward-compatibility
return {
'name': identity['username'].replace(' ', '_'),
'auth_state': {
'ACCESS_TOKEN_KEY': access_token.key,
'ACCESS_TOKEN_SECRET': access_token.secret,
'MEDIAWIKI_USER_IDENTITY': identity,
},
}
else:
self.log.error("No username found in %s", identity)
| jupyterhub/oauthenticator | oauthenticator/mediawiki.py | Python | bsd-3-clause | 4,178 |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This program wraps an arbitrary command since gn currently can only execute
scripts."""
import os
import subprocess
import sys
from shutil import copy2
args = sys.argv[1:]
args[0] = os.path.abspath(args[0])
#if sys.platform == 'darwin':
# copy2(os.path.join(os.path.dirname(args[0]), 'libffmpeg.dylib'), os.path.dirname(os.path.dirname(args[0])))
sys.exit(subprocess.call(args))
| nwjs/chromium.src | tools/v8_context_snapshot/run.py | Python | bsd-3-clause | 552 |
#
# FBrowserBase.py -- Base class for file browser plugin for fits viewer
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import os, glob
import stat, time
from ginga.misc import Bunch
from ginga import GingaPlugin
from ginga import AstroImage
from ginga.util import paths
from ginga.util.six.moves import map, zip
class FBrowserBase(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(FBrowserBase, self).__init__(fv, fitsimage)
self.keywords = ['OBJECT', 'UT']
self.columns = [('Name', 'name'),
('Size', 'st_size'),
('Mode', 'st_mode'),
('Last Changed', 'st_mtime')
]
self.jumpinfo = []
homedir = paths.home
self.curpath = os.path.join(homedir, '*')
self.do_scanfits = False
self.moving_cursor = False
def close(self):
chname = self.fv.get_channelName(self.fitsimage)
self.fv.stop_local_plugin(chname, str(self))
return True
def file_icon(self, bnch):
if bnch.type == 'dir':
pb = self.folderpb
elif bnch.type == 'fits':
pb = self.fitspb
else:
pb = self.filepb
return pb
def open_file(self, path):
self.logger.debug("path: %s" % (path))
if path == '..':
curdir, curglob = os.path.split(self.curpath)
path = os.path.join(curdir, path, curglob)
if os.path.isdir(path):
path = os.path.join(path, '*')
self.browse(path)
elif os.path.exists(path):
#self.fv.load_file(path)
uri = "file://%s" % (path)
self.fitsimage.make_callback('drag-drop', [uri])
else:
self.browse(path)
def get_info(self, path):
dirname, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
ftype = 'file'
if os.path.isdir(path):
ftype = 'dir'
elif os.path.islink(path):
ftype = 'link'
elif ext.lower() == '.fits':
ftype = 'fits'
try:
filestat = os.stat(path)
bnch = Bunch.Bunch(path=path, name=filename, type=ftype,
st_mode=filestat.st_mode, st_size=filestat.st_size,
st_mtime=filestat.st_mtime)
except OSError as e:
# TODO: identify some kind of error with this path
bnch = Bunch.Bunch(path=path, name=filename, type=ftype,
st_mode=0, st_size=0,
st_mtime=0)
return bnch
def browse(self, path):
self.logger.debug("path: %s" % (path))
if os.path.isdir(path):
dirname = path
globname = None
else:
dirname, globname = os.path.split(path)
dirname = os.path.abspath(dirname)
# check validity of leading path name
if not os.path.isdir(dirname):
self.fv.show_error("Not a valid path: %s" % (dirname))
return
if not globname:
globname = '*'
path = os.path.join(dirname, globname)
# Make a directory listing
self.logger.debug("globbing path: %s" % (path))
filelist = list(glob.glob(path))
filelist.sort(key=str.lower)
filelist.insert(0, os.path.join(dirname, '..'))
self.jumpinfo = list(map(self.get_info, filelist))
self.curpath = path
if self.do_scanfits:
self.scan_fits()
self.makelisting(path)
def scan_fits(self):
for bnch in self.jumpinfo:
if not bnch.type == 'fits':
continue
if 'kwds' not in bnch:
try:
in_f = AstroImage.pyfits.open(bnch.path, 'readonly')
try:
kwds = {}
for kwd in self.keywords:
kwds[kwd] = in_f[0].header.get(kwd, 'N/A')
bnch.kwds = kwds
finally:
in_f.close()
except Exception as e:
continue
def refresh(self):
self.browse(self.curpath)
def scan_headers(self):
self.browse(self.curpath)
def make_thumbs(self):
path = self.curpath
self.logger.info("Generating thumbnails for '%s'..." % (
path))
filelist = glob.glob(path)
filelist.sort(key=str.lower)
# find out our channel
chname = self.fv.get_channelName(self.fitsimage)
# Invoke the method in this channel's Thumbs plugin
# TODO: don't expose gpmon!
rsobj = self.fv.gpmon.getPlugin('Thumbs')
self.fv.nongui_do(rsobj.make_thumbs, chname, filelist)
def start(self):
self.win = None
self.browse(self.curpath)
def pause(self):
pass
def resume(self):
pass
def stop(self):
pass
def redo(self):
return True
#END
| bsipocz/ginga | ginga/misc/plugins/FBrowserBase.py | Python | bsd-3-clause | 5,421 |
from .cuda_products import gmt_func as gp_device
from .cuda_products import imt_func as ip_device
import numpy as np
import numba.cuda
import numba
import math
import random
from . import *
def sequential_rotor_estimation_chunks(reference_model_array, query_model_array, n_samples, n_objects_per_sample, mutation_probability=None):
# Stack up a list of numbers
total_matches = n_samples*n_objects_per_sample
sample_indices = random.sample(range(total_matches), total_matches)
n_mvs = reference_model_array.shape[0]
sample_indices = [i % n_mvs for i in sample_indices]
if mutation_probability is not None:
reference_model_array_new = []
mutation_flag = np.random.binomial(1, mutation_probability, total_matches)
for mut, i in zip(mutation_flag, sample_indices):
if mut:
ref_ind = random.sample(range(len(reference_model_array)), 1)[0]
else:
ref_ind = i
reference_model_array_new.append(reference_model_array[ref_ind, :])
reference_model_array_new = np.array(reference_model_array_new)
else:
reference_model_array_new = np.array([reference_model_array[i, :] for i in sample_indices], dtype=np.float64)
query_model_array_new = np.array([query_model_array[i, :] for i in sample_indices], dtype=np.float64)
output = np.zeros((n_samples, 32), dtype=np.float64)
cost_array = np.zeros(n_samples, dtype=np.float64)
sequential_rotor_estimation_chunks_jit(reference_model_array_new, query_model_array_new, output, cost_array)
return output, cost_array
def sequential_rotor_estimation_chunks_mvs(reference_model_list, query_model_list, n_samples, n_objects_per_sample, mutation_probability=None):
query_model_array = np.array([l.value for l in query_model_list])
reference_model_array = np.array([l.value for l in reference_model_list])
output, cost_array = sequential_rotor_estimation_chunks(reference_model_array, query_model_array, n_samples, n_objects_per_sample, mutation_probability=mutation_probability)
output_mvs = [query_model_list[0]._newMV(output[i, :]) for i in range(output.shape[0])]
return output_mvs, cost_array
@numba.cuda.jit(device=True)
def set_as_unit_rotor_device(array):
for j in range(1, 32):
array[j] = 0.0
array[0] = 1.0
@numba.cuda.jit(device=True)
def sequential_rotor_estimation_device(reference_model, query_model, rotor_output):
n_iterations = 20
cost_tolerance = 10 * (10 ** -16)
# Allocate memory
r_set = numba.cuda.local.array(32, dtype=numba.float64)
r_running = numba.cuda.local.array(32, dtype=numba.float64)
r_root = numba.cuda.local.array(32, dtype=numba.float64)
r_temp = numba.cuda.local.array(32, dtype=numba.float64)
C1 = numba.cuda.local.array(32, dtype=numba.float64)
# Set up the running rotor estimate
set_as_unit_rotor_device(r_running)
# Start iterating for convergence
for iteration_number in range(n_iterations):
# Set up the convergence check
set_as_unit_rotor_device(r_set)
# Iterate over the array of objects
for mv_ind in range(reference_model.shape[0]):
apply_rotor_device(query_model[mv_ind, :], r_running, C1)
normalise_mv_device(C1)
C2 = reference_model[mv_ind, :]
# Check if they are the same other than a sign flip
sum_abs = 0.0
for b_ind in range(32):
sum_abs += abs(C1[b_ind] + C2[b_ind])
if sum_abs < 0.0001:
set_as_unit_rotor_device(r_root)
else:
rotor_between_objects_device(C1, C2, r_temp)
square_root_of_rotor_device(r_temp, r_root)
# Update the set rotor and the running rotor
gp_device(r_root, r_set, r_temp)
normalise_mv_copy_device(r_temp, r_set)
gp_device(r_root, r_running, r_temp)
normalise_mv_copy_device(r_temp, r_running)
# Check if we have converged
if rotor_cost_device(r_set) < cost_tolerance:
normalise_mv_copy_device(r_running, rotor_output)
# Now calculate the cost of this transform
total_cost = 0.0
for object_ind in range(query_model.shape[0]):
apply_rotor_device(query_model[object_ind, :], rotor_output, r_temp)
total_cost += cost_between_objects_device(r_temp, reference_model[object_ind, :])
return total_cost
# Return whatever we have
normalise_mv_copy_device(r_running, rotor_output)
total_cost = 0.0
for object_ind in range(query_model.shape[0]):
apply_rotor_device(query_model[object_ind, :], rotor_output, r_temp)
total_cost += cost_between_objects_device(r_temp, reference_model[object_ind, :])
return total_cost
@numba.cuda.jit
def sequential_rotor_estimation_kernel(reference_model, query_model, output, cost_array):
# Break the model into n chunks and estimate the rotor based on each of those
n_chunks = output.shape[0]
n_objects_per_chunk = int(reference_model.shape[0]/n_chunks)
i = numba.cuda.grid(1)
if i < n_chunks:
ref = reference_model[i*n_objects_per_chunk:(i+1)*n_objects_per_chunk]
qer = query_model[i*n_objects_per_chunk:(i+1)*n_objects_per_chunk]
total_cost = sequential_rotor_estimation_device(ref, qer, output[i, :])
cost_array[i] = total_cost
def sequential_rotor_estimation_cuda(reference_model_array, query_model_array, n_samples=None, n_objects_per_sample=None, mutation_probability=None):
if n_samples is None:
n_samples = int(len(query_model_array)/2)
if n_objects_per_sample is None:
n_objects_per_sample = max(int(len(query_model_array)/10), 5)
# Stack up a list of numbers
total_matches = n_samples*n_objects_per_sample
sample_indices = random.sample(range(total_matches), total_matches)
n_mvs = reference_model_array.shape[0]
sample_indices = [i % n_mvs for i in sample_indices]
if mutation_probability is not None:
reference_model_array_new = []
mutation_flag = np.random.binomial(1, mutation_probability, total_matches)
for mut, i in zip(mutation_flag, sample_indices):
if mut:
ref_ind = random.sample(range(len(reference_model_array)), 1)[0]
else:
ref_ind = i
reference_model_array_new.append(reference_model_array[ref_ind, :])
reference_model_array_new = np.array(reference_model_array_new)
else:
reference_model_array_new = np.array([reference_model_array[i, :] for i in sample_indices], dtype=np.float64)
query_model_array_new = np.array([query_model_array[i, :] for i in sample_indices], dtype=np.float64)
output = np.zeros((n_samples, 32), dtype=np.float64)
cost_array = np.zeros(n_samples, dtype=np.float64)
blockdim = 64
griddim = int(math.ceil(reference_model_array_new.shape[0] / blockdim))
sequential_rotor_estimation_kernel[griddim, blockdim](reference_model_array_new, query_model_array_new, output, cost_array)
return output, cost_array
def sequential_rotor_estimation_cuda_mvs(reference_model_list, query_model_list, n_samples, n_objects_per_sample, mutation_probability=None):
query_model_array = np.array([l.value for l in query_model_list])
reference_model_array = np.array([l.value for l in reference_model_list])
output, cost_array = sequential_rotor_estimation_cuda(reference_model_array, query_model_array, n_samples, n_objects_per_sample, mutation_probability=mutation_probability)
output_mvs = [query_model_list[0]._newMV(output[i, :]) for i in range(output.shape[0])]
return output_mvs, cost_array
@numba.cuda.jit(device=True)
def apply_rotor_device(mv, rotor, output):
rotor_adjoint = numba.cuda.local.array(32, dtype=numba.float64)
temp = numba.cuda.local.array(32, dtype=numba.float64)
adjoint_device(rotor, rotor_adjoint)
gp_device(mv, rotor_adjoint, temp)
gp_device(rotor, temp, output)
@numba.cuda.jit
def apply_rotor_kernel(mv, rotor, output):
# This does elementwise gp with the input arrays into the ouput array
i = numba.cuda.grid(1)
if i < mv.shape[0]:
apply_rotor_device(mv[i, :], rotor[i, :], output[i, :])
@numba.cuda.jit(device=True)
def square_root_of_rotor_device(rotor, rotor_root):
k_value = numba.cuda.local.array(32, dtype=numba.float64)
sigma_val = numba.cuda.local.array(32, dtype=numba.float64)
C_val = numba.cuda.local.array(32, dtype=numba.float64)
for i in range(32):
C_val[i] = rotor[i]
C_val[0] += 1.0
gp_mult_with_adjoint(C_val, sigma_val)
positive_root_device(sigma_val, k_value)
annhilate_k_device(k_value, C_val, rotor_root)
@numba.cuda.jit
def square_root_of_rotor_kernel(value, output):
i = numba.cuda.grid(1)
if i < value.shape[0]:
square_root_of_rotor_device(value[i, :], output[i, :])
@numba.cuda.jit(device=True)
def adjoint_device(value, output):
for j in range(0, 6):
output[j] = value[j]
output[6] = -value[6]
output[7] = -value[7]
output[8] = -value[8]
output[9] = -value[9]
output[10] = -value[10]
output[11] = -value[11]
output[12] = -value[12]
output[13] = -value[13]
output[14] = -value[14]
output[15] = -value[15]
output[16] = -value[16]
output[17] = -value[17]
output[18] = -value[18]
output[19] = -value[19]
output[20] = -value[20]
output[21] = -value[21]
output[22] = -value[22]
output[23] = -value[23]
output[24] = -value[24]
output[25] = -value[25]
for j in range(26, 32):
output[j] = value[j]
@numba.cuda.jit
def gp_kernel(value, other_value, output):
# This does elementwise gp with the input arrays into the ouput array
i = numba.cuda.grid(1)
if i < value.shape[0]:
gp_device(value[i, :], other_value[i, :], output[i, :])
@numba.cuda.jit
def adjoint_kernel(value, output):
i = numba.cuda.grid(1)
if i < value.shape[0]:
adjoint_device(value[i, :], output[i, :])
@numba.cuda.jit
def ip_kernel(value, other_value, output):
i = numba.cuda.grid(1)
if i < value.shape[0]:
ip_device(value[i, :], other_value[i, :], output[i, :])
@numba.cuda.jit(device=True)
def project_val_cuda(val, output, grade):
for i in range(32):
output[i] = 0.0
if grade == 0:
output[0] = val[0]
elif grade == 1:
for j in range(1, 6):
output[j] = val[j]
elif grade == 2:
for j in range(6, 16):
output[j] = val[j]
elif grade == 3:
for j in range(16, 26):
output[j] = val[j]
elif grade == 4:
for j in range(26, 31):
output[j] = val[j]
elif grade == 5:
output[31] = val[31]
@numba.njit
def calc_norm_device(mv_val):
adj_value = numba.cuda.local.array(32, dtype=numba.float64)
output_value = numba.cuda.local.array(32, dtype=numba.float64)
adjoint_device(mv_val, adj_value)
gp_device(adj_value, mv_val, output_value)
return math.sqrt(abs(output_value[0]))
@numba.njit(device=True)
def normalise_mv_device(mv_val):
norm = calc_norm_device(mv_val)
for i in range(32):
mv_val[i] = mv_val[i]/norm
@numba.njit(device=True)
def normalise_mv_copy_device(mv_val, copy_array):
norm = calc_norm_device(mv_val)
for i in range(32):
copy_array[i] = mv_val[i]/norm
@numba.cuda.jit
def normalise_mvs_kernel(value_array):
i = numba.cuda.grid(1)
if i < value_array.shape[0]:
normalise_mv_device(value_array[i, :])
@numba.cuda.jit(device=True)
def annhilate_k_device(K_val, C_val, output):
k_4 = numba.cuda.local.array(32, dtype=numba.float64)
project_val_cuda(K_val, k_4, 4)
for i in range(32):
k_4[i] = -k_4[i]
k_4[0] += K_val[0]
gp_device(k_4, C_val, output)
normalise_mv_device(output)
@numba.jit(device=True)
def dorst_norm_val_device(sigma_val):
""" Square Root of Rotors - Implements the norm of a rotor"""
s_4 = numba.cuda.local.array(32, dtype=numba.float64)
s_4_sqrd = numba.cuda.local.array(32, dtype=numba.float64)
project_val_cuda(sigma_val, s_4, 4)
gp_device(s_4, s_4, s_4_sqrd)
sqrd_ans = sigma_val[0]*sigma_val[0] - s_4_sqrd[0]
return math.sqrt(abs(sqrd_ans))
@numba.cuda.jit
def dorst_norm_val_kernel(value, output):
i = numba.cuda.grid(1)
if i < value.shape[0]:
output[i] = dorst_norm_val_device(value[i, :])
@numba.cuda.jit(device=True)
def positive_root_device(sigma_val, result):
"""
Square Root of Rotors - Evaluates the positive root
"""
norm_s = dorst_norm_val_device(sigma_val)
denominator = (math.sqrt(2.0*sigma_val[0] + 2.0*norm_s))
for i in range(32):
result[i] = sigma_val[i]/denominator
result[0] = result[0] + norm_s/denominator
@numba.cuda.jit(device=True)
def rotor_between_objects_device(L1, L2, rotor):
L1sqrd_val = numba.cuda.local.array(32, dtype=numba.float64)
gp_device(L1, L1, L1sqrd_val)
if L1sqrd_val[0] > 0:
C_val = numba.cuda.local.array(32, dtype=numba.float64)
sigma_val = numba.cuda.local.array(32, dtype=numba.float64)
k_value = numba.cuda.local.array(32, dtype=numba.float64)
gp_device(L2, L1, C_val)
C_val[0] += 1.0
gp_mult_with_adjoint(C_val, sigma_val)
positive_root_device(sigma_val, k_value)
annhilate_k_device(k_value, C_val, rotor)
else:
L21 = numba.cuda.local.array(32, dtype=numba.float64)
L12 = numba.cuda.local.array(32, dtype=numba.float64)
gp_device(L2, L1, L21)
gp_device(L1, L2, L12)
sumval = 0.0
for i in range(32):
if i == 0:
sumval += abs(L12[i] + L21[i] - 2.0)
else:
sumval += abs(L12[i] + L21[i])
rotor[i] = -L21[i]
if sumval < 0.0000001:
rotor[0] = rotor[0] - 1.0
else:
rotor[0] = rotor[0] + 1.0
normalise_mv_device(rotor)
@numba.cuda.jit
def rotor_between_objects_kernel(value, other_value, output):
i = numba.cuda.grid(1)
if i < value.shape[0]:
rotor_between_objects_device(value[i, :], other_value[i, :], output[i, :])
@numba.cuda.jit(device=True)
def cost_between_objects_device(L1, L2):
R_val = numba.cuda.local.array(32, dtype=numba.float64)
rotor_between_objects_device(L1, L2, R_val)
return rotor_cost_device(R_val)
@numba.cuda.jit
def cost_between_objects_kernel(value, other_value, output):
# This does elementwise gp with the input arrays into the output array
i = numba.cuda.grid(1)
if i < value.shape[0]:
output[i] = cost_between_objects_device(value[i, :], other_value[i, :])
@numba.cuda.jit
def object_set_cost_kernel(line_set_a, line_set_b, cost_matrix):
a_ind, b_ind = numba.cuda.grid(2)
if a_ind < line_set_a.shape[0]:
if b_ind < line_set_b.shape[0]:
cost_matrix[a_ind, b_ind] = cost_between_objects_device(line_set_a[a_ind, :], line_set_b[b_ind, :])
def object_set_cost_cuda_value(line_set_a, line_set_b):
threadsperblock = (16, 16)
blockspergrid_x = math.ceil(line_set_a.shape[0] / threadsperblock[0])
blockspergrid_y = math.ceil(line_set_b.shape[0] / threadsperblock[1])
blockspergrid = (blockspergrid_x, blockspergrid_y)
cost_matrix = np.zeros((line_set_a.shape[0], line_set_b.shape[0]))
object_set_cost_kernel[blockspergrid, threadsperblock](line_set_a, line_set_b, cost_matrix)
return cost_matrix
def object_set_cost_cuda_mvs(line_set_a, line_set_b):
line_set_a_vals = np.array([l.value for l in line_set_a])
line_set_b_vals = np.array([l.value for l in line_set_b])
return object_set_cost_cuda_value(line_set_a_vals, line_set_b_vals)
@numba.cuda.jit(device=True)
def rotor_between_lines_device(L1, L2, rotor):
L21_val = numba.cuda.local.array(32, dtype=numba.float64)
L12_val = numba.cuda.local.array(32, dtype=numba.float64)
gp_device(L2, L1, L21_val)
gp_device(L1, L2, L12_val)
beta_val = numba.cuda.local.array(32, dtype=numba.float64)
K_val = numba.cuda.local.array(32, dtype=numba.float64)
for i in range(32):
K_val[i] = L21_val[i] + L12_val[i]
beta_val[i] = 0.0
K_val[0] += 2.0
project_val_cuda(K_val, beta_val, 4)
alpha = 2.0 * K_val[0]
denominator = math.sqrt(alpha / 2)
normalisation_val = numba.cuda.local.array(32, dtype=numba.float64)
output_val = numba.cuda.local.array(32, dtype=numba.float64)
for i in range(32):
if i == 0:
numerator_val = 1.0 - beta_val[i] / alpha
else:
numerator_val = -beta_val[i] / alpha
normalisation_val[i] = numerator_val / denominator
output_val[i] = L21_val[i]
output_val[0] += 1
gp_device(normalisation_val, output_val, rotor)
@numba.cuda.jit
def rotor_between_lines_kernel(value, other_value, output):
i = numba.cuda.grid(1)
if i < value.shape[0]:
rotor_between_lines_device(value[i, :], other_value[i, :], output[i, :])
@numba.cuda.jit(device=True)
def gp_mult_with_adjoint_to_scalar(value):
other_value = numba.cuda.local.array(32, dtype=numba.float64)
adjoint_device(value, other_value)
return value[0] * other_value[0] + value[3] * other_value[3] + value[4] * other_value[4] - value[5] * other_value[
5] - value[6] * other_value[6] - value[7] * other_value[7] - value[8] * other_value[8] + value[9] * other_value[
9] - value[10] * other_value[10] - value[11] * other_value[11] + value[12] * other_value[12] - value[
13] * other_value[13] + value[14] * other_value[14] + value[15] * other_value[15] + value[2] * \
other_value[2] - value[16] * other_value[16] + value[18] * other_value[18] - value[19] * other_value[19] + \
value[20] * other_value[20] + value[21] * other_value[21] - value[22] * other_value[22] + value[23] * \
other_value[23] + value[24] * other_value[24] + value[25] * other_value[25] + value[26] * other_value[26] - \
value[27] * other_value[27] - value[28] * other_value[28] - value[29] * other_value[29] - value[30] * \
other_value[30] - value[17] * other_value[17] + value[1] * other_value[1] - value[31] * other_value[31]
@numba.cuda.jit(device=True)
def gp_mult_with_adjoint(value, output):
other_value = numba.cuda.local.array(32, dtype=numba.float64)
adjoint_device(value, other_value)
gp_device(value, other_value, output)
@numba.cuda.jit(device=True)
def rotor_cost_device(R_val):
translation_val = numba.cuda.local.array(32, dtype=numba.float64)
rotation_val = numba.cuda.local.array(32, dtype=numba.float64)
ep_val = numba.cuda.local.array(32, dtype=numba.float64)
for i in range(32):
ep_val[i] = 0.0
ep_val[4] = 1.0
ip_device(R_val, ep_val, translation_val)
for i in range(32):
rotation_val[i] = R_val[i]
rotation_val[0] -= 1
a = abs(gp_mult_with_adjoint_to_scalar(rotation_val))
b = abs(gp_mult_with_adjoint_to_scalar(translation_val))
return a + b
@numba.cuda.jit(device=True)
def cost_line_to_line_device(L1, L2):
R_val = numba.cuda.local.array(32, dtype=numba.float64)
rotor_between_lines_device(L1, L2, R_val)
return rotor_cost_device(R_val)
@numba.cuda.jit
def cost_line_to_line_kernel(value, other_value, output):
# This does elementwise gp with the input arrays into the output array
i = numba.cuda.grid(1)
if i < value.shape[0]:
output[i] = cost_line_to_line_device(value[i, :], other_value[i, :])
@numba.cuda.jit
def line_set_cost_kernel(line_set_a, line_set_b, cost_matrix):
a_ind, b_ind = numba.cuda.grid(2)
if a_ind < line_set_a.shape[0]:
if b_ind < line_set_b.shape[0]:
cost_matrix[a_ind, b_ind] = cost_line_to_line_device(line_set_a[a_ind, :], line_set_b[b_ind, :])
def line_set_cost_cuda_value(line_set_a, line_set_b):
threadsperblock = (16, 16)
blockspergrid_x = math.ceil(line_set_a.shape[0] / threadsperblock[0])
blockspergrid_y = math.ceil(line_set_b.shape[0] / threadsperblock[1])
blockspergrid = (blockspergrid_x, blockspergrid_y)
cost_matrix = np.zeros((line_set_a.shape[0], line_set_b.shape[0]))
line_set_cost_kernel[blockspergrid, threadsperblock](line_set_a, line_set_b, cost_matrix)
return cost_matrix
def line_set_cost_cuda_mvs(line_set_a, line_set_b):
line_set_a_vals = np.array([l.value for l in line_set_a])
line_set_b_vals = np.array([l.value for l in line_set_b])
return line_set_cost_cuda_value(line_set_a_vals, line_set_b_vals)
| arsenovic/clifford | clifford/tools/g3c/cuda.py | Python | bsd-3-clause | 20,805 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## @copyright
# Software License Agreement (BSD License)
#
# Copyright (c) 2017, Jorge De La Cruz, Carmen Castano.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = 'Jorge De La Cruz, Carmen Castano'
__copyright__ = 'Copyright (c) 2017 Jorge De La Cruz, Carmen Castano'
__license__ = 'BSD'
__maintainer__ = 'Jorge De La Cruz'
__email__ = '[email protected]'
import sys
## Path to FreeCAD library
# change this by your FreeCAD library path
sys.path.append('/usr/lib/freecad/lib')
import FreeCAD as App
import Import
from datetime import datetime
class GetParameters:
def __init__(self):
self.filePath = '/home/jdelacruz/Downloads/KonzeptB_lang090715.stp'
def loadCAD(self):
print('Starting to load the CAD file, please be patient!...')
Import.open(self.filePath)
self.handler = App.ActiveDocument
self.parts = self.handler.Objects
print('CAD model loaded!')
def writeTxt(self):
f = open('data.txt','a')
print >>f, 'Name \t Label'
self.i = 0
self.size = len(self.parts)
self.names = range(self.size)
self.labels = range(self.size)
for self.part in self.parts:
self.names[self.i] = self.part.Name
self.labels[self.i] = self.part.Label
print >>f, self.part.Name+"\t"+self.part.Label
self.i += 1
f.close()
print('The txt file has been created successfully!')
if __name__ == '__main__':
data = GetParameters()
data.loadCAD()
data.writeTxt()
| jdelacruz26/misccode | cad2xls.py | Python | bsd-3-clause | 2,955 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 0, transform = "Fisher", sigma = 0.0, exog_count = 100, ar_order = 0); | antoinecarme/pyaf | tests/artificial/transf_Fisher/trend_MovingMedian/cycle_0/ar_/test_artificial_1024_Fisher_MovingMedian_0__100.py | Python | bsd-3-clause | 266 |
# Copyright (c) 2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
===========================
Upper Air Sounding Tutorial
===========================
Upper air analysis is a staple of many synoptic and mesoscale analysis
problems. In this tutorial we will gather weather balloon data, plot it,
perform a series of thermodynamic calculations, and summarize the results.
To learn more about the Skew-T diagram and its use in weather analysis and
forecasting, checkout `this <https://homes.comet.ucar.edu/~alanbol/aws-tr-79-006.pdf>`_
air weather service guide.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import Hodograph, SkewT
from metpy.units import units
#########################################################################
# Getting Data
# ------------
#
# Upper air data can be obtained using the siphon package, but for this tutorial we will use
# some of MetPy's sample data. This event is the Veterans Day tornado outbreak in 2002.
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('nov11_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.get_wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
##########################################################################
# We will pull the data out of the example dataset into individual variables and
# assign units.
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.get_wind_components(wind_speed, wind_dir)
##########################################################################
# Thermodynamic Calculations
# --------------------------
#
# Often times we will want to calculate some thermodynamic parameters of a
# sounding. The MetPy calc module has many such calculations already implemented!
#
# * **Lifting Condensation Level (LCL)** - The level at which an air parcel's
# relative humidity becomes 100% when lifted along a dry adiabatic path.
# * **Parcel Path** - Path followed by a hypothetical parcel of air, beginning
# at the surface temperature/pressure and rising dry adiabatically until
# reaching the LCL, then rising moist adiabatially.
# Calculate the LCL
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
print(lcl_pressure, lcl_temperature)
# Calculate the parcel profile.
parcel_prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
##########################################################################
# Basic Skew-T Plotting
# ---------------------
#
# The Skew-T (log-P) diagram is the standard way to view rawinsonde data. The
# y-axis is height in pressure coordinates and the x-axis is temperature. The
# y coordinates are plotted on a logarithmic scale and the x coordinate system
# is skewed. An explanation of skew-T interpretation is beyond the scope of this
# tutorial, but here we will plot one that can be used for analysis or
# publication.
#
# The most basic skew-T can be plotted with only five lines of Python.
# These lines perform the following tasks:
#
# 1. Create a ``Figure`` object and set the size of the figure.
#
# 2. Create a ``SkewT`` object
#
# 3. Plot the pressure and temperature (note that the pressure,
# the independent variable, is first even though it is plotted on the y-axis).
#
# 4. Plot the pressure and dewpoint temperature.
#
# 5. Plot the wind barbs at the appropriate pressure using the u and v wind
# components.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r', linewidth=2)
skew.plot(p, Td, 'g', linewidth=2)
skew.plot_barbs(p, u, v)
# Show the plot
plt.show()
##########################################################################
# Advanced Skew-T Plotting
# ------------------------
#
# Fiducial lines indicating dry adiabats, moist adiabats, and mixing ratio are
# useful when performing further analysis on the Skew-T diagram. Often the
# 0C isotherm is emphasized and areas of CAPE and CIN are shaded.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Plot LCL temperature as black dot
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Plot the parcel profile as a black line
skew.plot(p, parcel_prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, parcel_prof)
skew.shade_cape(p, T, parcel_prof)
# Plot a zero degree isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Show the plot
plt.show()
##########################################################################
# Adding a Hodograph
# ------------------
#
# A hodograph is a polar representation of the wind profile measured by the rawinsonde.
# Winds at different levels are plotted as vectors with their tails at the origin, the angle
# from the vertical axes representing the direction, and the length representing the speed.
# The line plotted on the hodograph is a line connecting the tips of these vectors,
# which are not drawn.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Plot LCL as black dot
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Plot the parcel profile as a black line
skew.plot(p, parcel_prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, parcel_prof)
skew.shade_cape(p, T, parcel_prof)
# Plot a zero degree isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Create a hodograph
# Create an inset axes object that is 40% width and height of the
# figure and put it in the upper right hand corner.
ax_hod = inset_axes(skew.ax, '40%', '40%', loc=1)
h = Hodograph(ax_hod, component_range=80.)
h.add_grid(increment=20)
h.plot_colormapped(u, v, wind_speed) # Plot a line colored by wind speed
# Show the plot
plt.show()
| metpy/MetPy | v0.8/_downloads/upperair_soundings.py | Python | bsd-3-clause | 7,536 |
## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
""" Deploy and install a package to a target
"""
import os
import sys
import zipfile
from qisys import ui
import qisys.command
import qisys.parsers
import qipkg.package
def configure_parser(parser):
qisys.parsers.default_parser(parser)
qisys.parsers.deploy_parser(parser)
parser.add_argument("pkg_path")
def do(args):
urls = qisys.parsers.get_deploy_urls(args)
pkg_path = args.pkg_path
for url in urls:
deploy(pkg_path, url)
def deploy(pkg_path, url):
ui.info(ui.green, "Deploying",
ui.reset, ui.blue, pkg_path,
ui.reset, ui.green, "to",
ui.reset, ui.blue, url.as_string)
pkg_name = qipkg.package.name_from_archive(pkg_path)
scp_cmd = ["scp",
pkg_path,
"%s@%s:" % (url.user, url.host)]
qisys.command.call(scp_cmd)
try:
_install_package(url, pkg_name, pkg_path)
except Exception as e:
ui.error("Unable to install package on target")
ui.error("Error was: ", e)
rm_cmd = ["ssh", "%s@%s" % (url.user, url.host),
"rm", os.path.basename(pkg_path)]
qisys.command.call(rm_cmd)
def _install_package(url, pkg_name, pkg_path):
import qi
app = qi.Application()
session = qi.Session()
session.connect("tcp://%s:9559" % (url.host))
package_manager = session.service("PackageManager")
package_manager.removePkg(pkg_name)
ret = package_manager.install(
"/home/%s/%s" % (url.user, os.path.basename(pkg_path)))
ui.info("PackageManager returned: ", ret)
| dmerejkowsky/qibuild | python/qipkg/actions/deploy_package.py | Python | bsd-3-clause | 1,732 |
"""
SMQTK Web Applications
"""
import inspect
import logging
import os
import flask
import smqtk.utils
from smqtk.utils import plugin
class SmqtkWebApp (flask.Flask, smqtk.utils.Configurable, plugin.Pluggable):
"""
Base class for SMQTK web applications
"""
@classmethod
def impl_directory(cls):
"""
:return: Directory in which this implementation is contained.
:rtype: str
"""
return os.path.dirname(os.path.abspath(inspect.getfile(cls)))
@classmethod
def get_default_config(cls):
"""
Generate and return a default configuration dictionary for this class.
This will be primarily used for generating what the configuration
dictionary would look like for this class without instantiating it.
This should be overridden in each implemented application class to add
appropriate configuration.
:return: Default configuration dictionary for the class.
:rtype: dict
"""
return {
"flask_app": {
"SECRET_KEY": "MySuperUltraSecret",
"BASIC_AUTH_USERNAME": "demo",
"BASIC_AUTH_PASSWORD": "demo"
},
"server": {
'host': "127.0.0.1",
'port': 5000
}
}
@classmethod
def from_config(cls, config_dict):
return cls(config_dict)
def __init__(self, json_config):
"""
Initialize application based of supplied JSON configuration
:param json_config: JSON configuration dictionary
:type json_config: dict
"""
super(SmqtkWebApp, self).__init__(
self.__class__.__name__,
static_folder=os.path.join(self.impl_directory(), 'static'),
template_folder=os.path.join(self.impl_directory(), 'templates')
)
#
# Configuration setup
#
self.json_config = json_config
# Factor 'flask_app' configuration properties into self.config
for k in self.json_config['flask_app']:
self.config[k] = self.json_config['flask_app'][k]
#
# Security
#
self.secret_key = self.config['SECRET_KEY']
def get_config(self):
return self.json_config
@property
def log(self):
return logging.getLogger('.'.join((self.__module__,
self.__class__.__name__)))
def run(self, host=None, port=None, debug=False, **options):
"""
Override of the run method, drawing running host and port from
configuration by default. 'host' and 'port' values specified as argument
or keyword will override the app configuration.
"""
super(SmqtkWebApp, self)\
.run(host=(host or self.json_config['server']['host']),
port=(port or self.json_config['server']['port']),
debug=debug,
**options)
def get_web_applications(reload_modules=False):
"""
Discover and return SmqtkWebApp implementation classes found in the plugin
directory. Keys in the returned map are the names of the discovered classes
and the paired values are the actual class type objects.
We look for modules (directories or files) that start with and alphanumeric
character ('_' prefixed files/directories are hidden, but not recommended).
Within a module, we first look for a helper variable by the name
``APPLICATION_CLASS``, which can either be a single class object or
an iterable of class objects, to be exported. If the variable is set to
None, we skip that module and do not import anything. If the variable is not
present, we look for a class by the same na e and casing as the module's
name. If neither are found, the module is skipped.
:param reload_modules: Explicitly reload discovered modules from source.
:type reload_modules: bool
:return: Map of discovered class objects of type ``SmqtkWebApp`` whose
keys are the string names of the classes.
:rtype: dict[str, type]
"""
import os
from smqtk.utils.plugin import get_plugins
this_dir = os.path.abspath(os.path.dirname(__file__))
env_var = "APPLICATION_PATH"
helper_var = "APPLICATION_CLASS"
return get_plugins(__name__, this_dir, env_var, helper_var, SmqtkWebApp,
reload_modules=reload_modules)
| kfieldho/SMQTK | python/smqtk/web/__init__.py | Python | bsd-3-clause | 4,457 |
from __future__ import unicode_literals
from django.db import models
from smartmin.models import SmartModel, ActiveManager
class Post(SmartModel):
title = models.CharField(max_length=128,
help_text="The title of this blog post, keep it relevant")
body = models.TextField(help_text="The body of the post, go crazy")
order = models.IntegerField(help_text="The order for this post, posts with smaller orders come first")
tags = models.CharField(max_length=128,
help_text="Any tags for this post")
objects = models.Manager()
active = ActiveManager()
@classmethod
def pre_create_instance(cls, field_dict):
field_dict['body'] = "Body: %s" % field_dict['body']
return field_dict
@classmethod
def prepare_fields(cls, field_dict, import_params=None, user=None):
field_dict['order'] = int(float(field_dict['order']))
return field_dict
@classmethod
def validate_import_header(cls, header):
if 'title' not in header:
raise Exception('missing "title" header')
def __unicode__(self):
return self.title
class Category(SmartModel):
name = models.SlugField(max_length=64, unique=True,
help_text="The name of this category")
| caktus/smartmin | test_runner/blog/models.py | Python | bsd-3-clause | 1,319 |
from __future__ import absolute_import
import pkgutil
import six
MODEL_MOVES = {
"sentry.models.tagkey.TagKey": "sentry.tagstore.legacy.models.tagkey.TagKey",
"sentry.models.tagvalue.tagvalue": "sentry.tagstore.legacy.models.tagvalue.TagValue",
"sentry.models.grouptagkey.GroupTagKey": "sentry.tagstore.legacy.models.grouptagkey.GroupTagKey",
"sentry.models.grouptagvalue.GroupTagValue": "sentry.tagstore.legacy.models.grouptagvalue.GroupTagValue",
"sentry.models.eventtag.EventTag": "sentry.tagstore.legacy.models.eventtag.EventTag",
}
class ModuleProxyCache(dict):
def __missing__(self, key):
if "." not in key:
return __import__(key)
module_name, class_name = key.rsplit(".", 1)
module = __import__(module_name, {}, {}, [class_name])
handler = getattr(module, class_name)
# We cache a NoneType for missing imports to avoid repeated lookups
self[key] = handler
return handler
_cache = ModuleProxyCache()
def import_string(path):
"""
Path must be module.path.ClassName
>>> cls = import_string('sentry.models.Group')
"""
path = MODEL_MOVES.get(path, path)
result = _cache[path]
return result
def import_submodules(context, root_module, path):
"""
Import all submodules and register them in the ``context`` namespace.
>>> import_submodules(locals(), __name__, __path__)
"""
for loader, module_name, is_pkg in pkgutil.walk_packages(path, root_module + "."):
# this causes a Runtime error with model conflicts
# module = loader.find_module(module_name).load_module(module_name)
module = __import__(module_name, globals(), locals(), ["__name__"])
for k, v in six.iteritems(vars(module)):
if not k.startswith("_"):
context[k] = v
context[module_name] = module
| mvaled/sentry | src/sentry/utils/imports.py | Python | bsd-3-clause | 1,880 |
from pyelectro import analysis as pye_analysis
from matplotlib import pyplot
file_name = "100pA_1a.csv"
t, v = pye_analysis.load_csv_data(file_name)
analysis_var = {
"peak_delta": 0.1,
"baseline": 0,
"dvdt_threshold": 2,
"peak_threshold": 0,
}
analysis = pye_analysis.IClampAnalysis(
v, t, analysis_var, start_analysis=150, end_analysis=900
)
res = analysis.analyse()
print res
pyplot.plot(t, v)
pyplot.suptitle("Data read in from: %s" % file_name)
pyplot.show()
| NeuralEnsemble/neurotune | examples/example_1/data_analysis.py | Python | bsd-3-clause | 489 |
import json
from django.test.utils import override_settings
import pytest
from pyquery import PyQuery
from fjord.base import views
from fjord.base.tests import (
LocalizingClient,
TestCase,
AnalyzerProfileFactory,
reverse
)
from fjord.base.views import IntentionalException
from fjord.search.tests import ElasticTestCase
class TestAbout(TestCase):
client_class = LocalizingClient
def test_about_view(self):
resp = self.client.get(reverse('about-view'))
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'about.html')
class TestLoginFailure(TestCase):
def test_login_failure_view(self):
resp = self.client.get(reverse('login-failure'))
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'login_failure.html')
resp = self.client.get(reverse('login-failure'), {'mobile': 1})
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'mobile/login_failure.html')
# Note: This needs to be an ElasticTestCase because the view does ES
# stuff.
class TestMonitorView(ElasticTestCase):
def test_monitor_view(self):
"""Tests for the monitor view."""
# TODO: When we add a mocking framework, we can mock this
# properly.
test_memcached = views.test_memcached
try:
with self.settings(
SHOW_STAGE_NOTICE=True,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', # noqa
'LOCATION': ['localhost:11211', 'localhost2:11211']
}
}):
# Mock the test_memcached function so it always returns
# True.
views.test_memcached = lambda host, port: True
# TODO: Replace when we get a mock library.
def mock_rabbitmq():
class MockRabbitMQ(object):
def connect(self):
return True
return lambda *a, **kw: MockRabbitMQ()
views.establish_connection = mock_rabbitmq()
# Request /services/monitor and make sure it returns
# HTTP 200 and that there aren't errors on the page.
resp = self.client.get(reverse('services-monitor'))
errors = [line for line in resp.content.splitlines()
if 'ERROR' in line]
assert resp.status_code == 200, '%s != %s (%s)' % (
resp.status_code, 200, repr(errors))
finally:
views.test_memcached = test_memcached
class TestFileNotFound(TestCase):
client_class = LocalizingClient
def test_404(self):
request = self.client.get('/a/path/that/should/never/exist')
assert request.status_code == 404
self.assertTemplateUsed(request, '404.html')
class TestServerError(TestCase):
@override_settings(SHOW_STAGE_NOTICE=True)
def test_500(self):
with pytest.raises(IntentionalException):
self.client.get('/services/throw-error')
class TestRobots(TestCase):
def test_robots(self):
resp = self.client.get('/robots.txt')
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'robots.txt')
class TestContribute(TestCase):
def test_contribute(self):
resp = self.client.get('/contribute.json')
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'contribute.json')
def test_contribute_if_valid_json(self):
resp = self.client.get('/contribute.json')
# json.loads throws a ValueError when contribute.json is invalid JSON.
json.loads(resp.content)
class TestNewUserView(ElasticTestCase):
def setUp(self):
super(TestNewUserView, self).setUp()
jane = AnalyzerProfileFactory().user
self.jane = jane
def test_redirect_to_dashboard_if_anonymous(self):
# AnonymousUser shouldn't get to the new-user-view, so make
# sure they get redirected to the dashboard.
resp = self.client.get(reverse('new-user-view'), follow=True)
assert resp.status_code == 200
self.assertTemplateNotUsed('new_user.html')
self.assertTemplateUsed('analytics/dashboard.html')
def test_default_next_url(self):
self.client_login_user(self.jane)
resp = self.client.get(reverse('new-user-view'))
assert resp.status_code == 200
self.assertTemplateUsed('new_user.html')
# Pull out next link
pq = PyQuery(resp.content)
next_url = pq('#next-url-link')
assert next_url.attr['href'] == '/en-US/' # this is the dashboard
def test_valid_next_url(self):
self.client_login_user(self.jane)
url = reverse('new-user-view')
resp = self.client.get(url, {
'next': '/ou812' # stretches the meaning of 'valid'
})
assert resp.status_code == 200
self.assertTemplateUsed('new_user.html')
# Pull out next link which is naughty, so it should have been
# replaced with a dashboard link.
pq = PyQuery(resp.content)
next_url = pq('#next-url-link')
assert next_url.attr['href'] == '/ou812'
def test_sanitized_next_url(self):
self.client_login_user(self.jane)
url = reverse('new-user-view')
resp = self.client.get(url, {
'next': 'javascript:prompt%28document.cookie%29'
})
assert resp.status_code == 200
self.assertTemplateUsed('new_user.html')
# Pull out next link which is naughty, so it should have been
# replaced with a dashboard link.
pq = PyQuery(resp.content)
next_url = pq('#next-url-link')
assert next_url.attr['href'] == '/en-US/' # this is the dashboard
| Ritsyy/fjord | fjord/base/tests/test_views.py | Python | bsd-3-clause | 5,907 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_i3wm-formula
----------------------------------
Tests for `i3wm-formula` module.
"""
import unittest
from i3wm-formula import i3wm-formula
class TestI3wm-formula(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main() | westurner/i3wm-formula | tests/test_i3wm-formula.py | Python | bsd-3-clause | 407 |
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E08000032'
addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.tsvJune2017.tsv'
stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.tsvJune2017.tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| chris48s/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_bradford.py | Python | bsd-3-clause | 408 |
import os
import pytest
from datetime import time
from sport_systems import stats
BASE_DIR = os.path.dirname(__file__)
@pytest.fixture
def response_1():
path = os.path.join(BASE_DIR, 'response-1.xml')
with open(path, 'rb') as fin:
data = fin.read()
return data
@pytest.fixture
def results_1():
return [
stats.Result(time=time(1, 10, 15), name='foo'),
stats.Result(time=time(1, 10, 20), name='foo'),
stats.Result(time=time(1, 20), name='foo'),
stats.Result(time=time(1, 30, 10), name='foo'),
stats.Result(time=time(1, 30, 15), name='foo'),
stats.Result(time=time(1, 30, 20), name='foo'),
]
| willcodefortea/sportssystems_crawler | tests/conftest.py | Python | bsd-3-clause | 672 |
from django.test import TestCase
from mock import Mock, patch
from paymentexpress.facade import Facade
from paymentexpress.gateway import AUTH, PURCHASE
from paymentexpress.models import OrderTransaction
from tests import (XmlTestingMixin, CARD_VISA, SAMPLE_SUCCESSFUL_RESPONSE,
SAMPLE_DECLINED_RESPONSE, SAMPLE_ERROR_RESPONSE)
from oscar.apps.payment.utils import Bankcard
from oscar.apps.payment.exceptions import (UnableToTakePayment,
InvalidGatewayRequestError)
class MockedResponseTestCase(TestCase):
def create_mock_response(self, body, status_code=200):
response = Mock()
response.content = body
response.text = body
response.status_code = status_code
return response
class FacadeTests(TestCase, XmlTestingMixin):
def setUp(self):
self.facade = Facade()
def test_zero_amount_raises_exception(self):
card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
with self.assertRaises(UnableToTakePayment):
self.facade.authorise('1000', 0, card)
def test_zero_amount_for_complete_raises_exception(self):
with self.assertRaises(UnableToTakePayment):
self.facade.complete('1000', 0, '1234')
def test_zero_amount_for_purchase_raises_exception(self):
with self.assertRaises(UnableToTakePayment):
self.facade.purchase('1000', 0)
def test_purchase_without_billing_id_or_card_raises_exception(self):
with self.assertRaises(ValueError):
self.facade.purchase('1000', 1.23)
def test_zero_amount_for_refund_raises_exception(self):
with self.assertRaises(UnableToTakePayment):
self.facade.refund('1000', 0, '1234')
def test_merchant_reference_format(self):
merchant_ref = self.facade._get_merchant_reference('1000', AUTH)
self.assertRegexpMatches(merchant_ref, r'^\d+_[A-Z]+_\d+_\d{4}$')
class FacadeSuccessfulResponseTests(MockedResponseTestCase):
dps_txn_ref = '000000030884cdc6'
dps_billing_id = '0000080023225598'
def setUp(self):
self.facade = Facade()
self.card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
def test_successful_call_returns_valid_dict(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
auth_dict = self.facade.authorise('1000', 1, self.card)
complete_dict = self.facade.complete('1000', 1.23,
self.dps_txn_ref)
refund_dict = self.facade.refund('1000', 1.23, '000000030884cdc6')
validate_dict = self.facade.validate(self.card)
response_dicts = (auth_dict, complete_dict, refund_dict,
validate_dict)
for response_dict in response_dicts:
self.assertEquals(self.dps_txn_ref,
response_dict['txn_reference'])
self.assertEquals(self.dps_billing_id,
response_dict['partner_reference'])
def test_purchase_with_billing_id_returns_valid_dict(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
txn_ref = self.facade.purchase('1000', 1.23, 'abc123')
self.assertEquals(self.dps_txn_ref, txn_ref['txn_reference'])
def test_purchase_with_bankcard_returns_valid_dict(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
txn_ref = self.facade.purchase('1000', 1.23, None, self.card)
self.assertEquals(self.dps_txn_ref, txn_ref['txn_reference'])
def test_successful_call_is_recorded(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
self.facade.authorise('10001', 10.25, self.card)
txn = OrderTransaction.objects.filter(order_number='10001')[0]
self.assertEquals(AUTH, txn.txn_type)
def test_empty_issue_date_is_allowed(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123")
txn_ref = self.facade.authorise('1000', 1.23, card)
self.assertEquals(self.dps_txn_ref, txn_ref['txn_reference'])
class FacadeDeclinedResponseTests(MockedResponseTestCase):
def setUp(self):
self.facade = Facade()
self.card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
def test_declined_call_raises_an_exception(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_DECLINED_RESPONSE)
with self.assertRaises(UnableToTakePayment):
self.facade.authorise('1000', 1, self.card)
with self.assertRaises(UnableToTakePayment):
self.facade.complete('1000', 1.23, '000000030884cdc6')
with self.assertRaises(UnableToTakePayment):
self.facade.purchase('1000', 1.23, 'abc123')
with self.assertRaises(UnableToTakePayment):
self.facade.purchase('1000', 1.23, None, self.card)
with self.assertRaises(UnableToTakePayment):
self.facade.refund('1000', 1.23, '000000030884cdc6')
with self.assertRaises(UnableToTakePayment):
self.facade.validate(self.card)
def test_declined_call_is_recorded(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_DECLINED_RESPONSE)
try:
self.facade.purchase('1001', 10.24, None, self.card)
except Exception:
pass
txn = OrderTransaction.objects.filter(order_number='1001')[0]
self.assertIsNotNone(txn)
self.assertEquals(PURCHASE, txn.txn_type)
class FacadeErrorResponseTests(MockedResponseTestCase):
def setUp(self):
self.facade = Facade()
self.card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
def test_error_response_raises_invalid_gateway_request_exception(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_ERROR_RESPONSE)
with self.assertRaises(InvalidGatewayRequestError):
self.facade.purchase('1000', 10.24, None, self.card)
| django-oscar/django-oscar-paymentexpress | tests/facade_tests.py | Python | bsd-3-clause | 7,334 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import os.path
import sys
from genshi.builder import tag
from trac.admin import IAdminCommandProvider, IAdminPanelProvider
from trac.config import ListOption
from trac.core import *
from trac.perm import IPermissionRequestor
from trac.util import as_bool, is_path_below
from trac.util.compat import any
from trac.util.text import breakable_path, normalize_whitespace, print_table, \
printout
from trac.util.translation import _, ngettext, tag_
from trac.versioncontrol import DbRepositoryProvider, RepositoryManager, \
is_default
from trac.web.chrome import Chrome, add_notice, add_warning
class VersionControlAdmin(Component):
"""trac-admin command provider for version control administration."""
implements(IAdminCommandProvider, IPermissionRequestor)
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('changeset added', '<repos> <rev> [rev] [...]',
"""Notify trac about changesets added to a repository
This command should be called from a post-commit hook. It will
trigger a cache update and notify components about the addition.
""",
self._complete_repos, self._do_changeset_added)
yield ('changeset modified', '<repos> <rev> [rev] [...]',
"""Notify trac about changesets modified in a repository
This command should be called from a post-revprop hook after
revision properties like the commit message, author or date
have been changed. It will trigger a cache update for the given
revisions and notify components about the change.
""",
self._complete_repos, self._do_changeset_modified)
yield ('repository list', '',
'List source repositories',
None, self._do_list)
yield ('repository resync', '<repos> [rev]',
"""Re-synchronize trac with repositories
When [rev] is specified, only that revision is synchronized.
Otherwise, the complete revision history is synchronized. Note
that this operation can take a long time to complete.
If synchronization gets interrupted, it can be resumed later
using the `sync` command.
To synchronize all repositories, specify "*" as the repository.
""",
self._complete_repos, self._do_resync)
yield ('repository sync', '<repos> [rev]',
"""Resume synchronization of repositories
Similar to `resync`, but doesn't clear the already synchronized
changesets. Useful for resuming an interrupted `resync`.
To synchronize all repositories, specify "*" as the repository.
""",
self._complete_repos, self._do_sync)
def get_reponames(self):
rm = RepositoryManager(self.env)
return [reponame or '(default)' for reponame
in rm.get_all_repositories()]
def _complete_repos(self, args):
if len(args) == 1:
return self.get_reponames()
def _do_changeset_added(self, reponame, *revs):
if is_default(reponame):
reponame = ''
rm = RepositoryManager(self.env)
rm.notify('changeset_added', reponame, revs)
def _do_changeset_modified(self, reponame, *revs):
if is_default(reponame):
reponame = ''
rm = RepositoryManager(self.env)
rm.notify('changeset_modified', reponame, revs)
def _do_list(self):
rm = RepositoryManager(self.env)
values = []
for (reponame, info) in sorted(rm.get_all_repositories().iteritems()):
alias = ''
if 'alias' in info:
alias = info['alias'] or '(default)'
values.append((reponame or '(default)', info.get('type', ''),
alias, info.get('dir', '')))
print_table(values, [_('Name'), _('Type'), _('Alias'), _('Directory')])
def _sync(self, reponame, rev, clean):
rm = RepositoryManager(self.env)
if reponame == '*':
if rev is not None:
raise TracError(_('Cannot synchronize a single revision '
'on multiple repositories'))
repositories = rm.get_real_repositories()
else:
if is_default(reponame):
reponame = ''
repos = rm.get_repository(reponame)
if repos is None:
raise TracError(_("Repository '%(repo)s' not found",
repo=reponame or '(default)'))
if rev is not None:
repos.sync_changeset(rev)
printout(_('%(rev)s resynced on %(reponame)s.', rev=rev,
reponame=repos.reponame or '(default)'))
return
repositories = [repos]
db = self.env.get_db_cnx()
for repos in sorted(repositories, key=lambda r: r.reponame):
printout(_('Resyncing repository history for %(reponame)s... ',
reponame=repos.reponame or '(default)'))
repos.sync(self._sync_feedback, clean=clean)
cursor = db.cursor()
cursor.execute("SELECT count(rev) FROM revision WHERE repos=%s",
(repos.id,))
for cnt, in cursor:
printout(ngettext('%(num)s revision cached.',
'%(num)s revisions cached.', num=cnt))
printout(_('Done.'))
def _sync_feedback(self, rev):
sys.stdout.write(' [%s]\r' % rev)
sys.stdout.flush()
def _do_resync(self, reponame, rev=None):
self._sync(reponame, rev, clean=True)
def _do_sync(self, reponame, rev=None):
self._sync(reponame, rev, clean=False)
# IPermissionRequestor methods
def get_permission_actions(self):
return [('VERSIONCONTROL_ADMIN', ['BROWSER_VIEW', 'CHANGESET_VIEW',
'FILE_VIEW', 'LOG_VIEW'])]
class RepositoryAdminPanel(Component):
"""Web admin panel for repository administration."""
implements(IAdminPanelProvider)
allowed_repository_dir_prefixes = ListOption('versioncontrol',
'allowed_repository_dir_prefixes', '',
doc="""Comma-separated list of allowed prefixes for repository
directories when adding and editing repositories in the repository
admin panel. If the list is empty, all repository directories are
allowed. (''since 0.12.1'')""")
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'VERSIONCONTROL_ADMIN' in req.perm:
yield ('versioncontrol', _('Version Control'), 'repository',
_('Repositories'))
def render_admin_panel(self, req, category, page, path_info):
req.perm.require('VERSIONCONTROL_ADMIN')
# Retrieve info for all repositories
rm = RepositoryManager(self.env)
all_repos = rm.get_all_repositories()
db_provider = self.env[DbRepositoryProvider]
if path_info:
# Detail view
reponame = not is_default(path_info) and path_info or ''
info = all_repos.get(reponame)
if info is None:
raise TracError(_("Repository '%(repo)s' not found",
repo=path_info))
if req.method == 'POST':
if req.args.get('cancel'):
req.redirect(req.href.admin(category, page))
elif db_provider and req.args.get('save'):
# Modify repository
changes = {}
for field in db_provider.repository_attrs:
value = normalize_whitespace(req.args.get(field))
if (value is not None or field == 'hidden') \
and value != info.get(field):
changes[field] = value
if 'dir' in changes \
and not self._check_dir(req, changes['dir']):
changes = {}
if changes:
db_provider.modify_repository(reponame, changes)
add_notice(req, _('Your changes have been saved.'))
name = req.args.get('name')
resync = tag.tt('trac-admin $ENV repository resync "%s"'
% (name or '(default)'))
if 'dir' in changes:
msg = tag_('You should now run %(resync)s to '
'synchronize Trac with the repository.',
resync=resync)
add_notice(req, msg)
elif 'type' in changes:
msg = tag_('You may have to run %(resync)s to '
'synchronize Trac with the repository.',
resync=resync)
add_notice(req, msg)
if name and name != path_info and not 'alias' in info:
cset_added = tag.tt('trac-admin $ENV changeset '
'added "%s" $REV'
% (name or '(default)'))
msg = tag_('You will need to update your post-commit '
'hook to call %(cset_added)s with the new '
'repository name.', cset_added=cset_added)
add_notice(req, msg)
if changes:
req.redirect(req.href.admin(category, page))
Chrome(self.env).add_wiki_toolbars(req)
data = {'view': 'detail', 'reponame': reponame}
else:
# List view
if req.method == 'POST':
# Add a repository
if db_provider and req.args.get('add_repos'):
name = req.args.get('name')
type_ = req.args.get('type')
# Avoid errors when copy/pasting paths
dir = normalize_whitespace(req.args.get('dir', ''))
if name is None or type_ is None or not dir:
add_warning(req, _('Missing arguments to add a '
'repository.'))
elif self._check_dir(req, dir):
db_provider.add_repository(name, dir, type_)
name = name or '(default)'
add_notice(req, _('The repository "%(name)s" has been '
'added.', name=name))
resync = tag.tt('trac-admin $ENV repository resync '
'"%s"' % name)
msg = tag_('You should now run %(resync)s to '
'synchronize Trac with the repository.',
resync=resync)
add_notice(req, msg)
cset_added = tag.tt('trac-admin $ENV changeset '
'added "%s" $REV' % name)
msg = tag_('You should also set up a post-commit hook '
'on the repository to call %(cset_added)s '
'for each committed changeset.',
cset_added=cset_added)
add_notice(req, msg)
req.redirect(req.href.admin(category, page))
# Add a repository alias
elif db_provider and req.args.get('add_alias'):
name = req.args.get('name')
alias = req.args.get('alias')
if name is not None and alias is not None:
db_provider.add_alias(name, alias)
add_notice(req, _('The alias "%(name)s" has been '
'added.', name=name or '(default)'))
req.redirect(req.href.admin(category, page))
add_warning(req, _('Missing arguments to add an '
'alias.'))
# Refresh the list of repositories
elif req.args.get('refresh'):
req.redirect(req.href.admin(category, page))
# Remove repositories
elif db_provider and req.args.get('remove'):
sel = req.args.getlist('sel')
if sel:
for name in sel:
db_provider.remove_repository(name)
add_notice(req, _('The selected repositories have '
'been removed.'))
req.redirect(req.href.admin(category, page))
add_warning(req, _('No repositories were selected.'))
data = {'view': 'list'}
# Find repositories that are editable
db_repos = {}
if db_provider is not None:
db_repos = dict(db_provider.get_repositories())
# Prepare common rendering data
repositories = dict((reponame, self._extend_info(reponame, info.copy(),
reponame in db_repos))
for (reponame, info) in all_repos.iteritems())
types = sorted([''] + rm.get_supported_types())
data.update({'types': types, 'default_type': rm.repository_type,
'repositories': repositories})
return 'admin_repositories.html', data
def _extend_info(self, reponame, info, editable):
"""Extend repository info for rendering."""
info['name'] = reponame
if info.get('dir') is not None:
info['prettydir'] = breakable_path(info['dir']) or ''
info['hidden'] = as_bool(info.get('hidden'))
info['editable'] = editable
if not info.get('alias'):
try:
repos = RepositoryManager(self.env).get_repository(reponame)
youngest_rev = repos.get_youngest_rev()
info['rev'] = youngest_rev
info['display_rev'] = repos.display_rev(youngest_rev)
except Exception:
pass
return info
def _check_dir(self, req, dir):
"""Check that a repository directory is valid, and add a warning
message if not.
"""
if not os.path.isabs(dir):
add_warning(req, _('The repository directory must be an absolute '
'path.'))
return False
prefixes = [os.path.join(self.env.path, prefix)
for prefix in self.allowed_repository_dir_prefixes]
if prefixes and not any(is_path_below(dir, prefix)
for prefix in prefixes):
add_warning(req, _('The repository directory must be located '
'below one of the following directories: '
'%(dirs)s', dirs=', '.join(prefixes)))
return False
return True
| zjj/trac_hack | trac/versioncontrol/admin.py | Python | bsd-3-clause | 16,185 |
from acoustics.decibel import *
def test_dbsum():
assert(abs(dbsum([10.0, 10.0]) - 13.0103) < 1e-5)
def test_dbmean():
assert(dbmean([10.0, 10.0]) == 10.0)
def test_dbadd():
assert(abs(dbadd(10.0, 10.0) - 13.0103) < 1e-5)
def test_dbsub():
assert(abs(dbsub(13.0103, 10.0) - 10.0) < 1e-5)
def test_dbmul():
assert(abs(dbmul(10.0, 2) - 13.0103) < 1e-5)
def test_dbdiv():
assert(abs(dbdiv(13.0103, 2) - 10.0) < 1e-5) | FRidh/python-acoustics | tests/test_decibel.py | Python | bsd-3-clause | 451 |
from djangothis.app import read_yaml, read_yaml_file, watchfile
| amitu/djangothis | djangothis/__init__.py | Python | bsd-3-clause | 64 |
# coding: utf-8
# Copyright 2015 rpaas authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
from rpaas import plan, storage
class MongoDBStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage = storage.MongoDBStorage()
self.storage.db[self.storage.quota_collection].remove()
self.storage.db[self.storage.plans_collection].remove()
self.storage.db[self.storage.plans_collection].insert(
{"_id": "small",
"description": "some cool plan",
"config": {"serviceofferingid": "abcdef123456"}}
)
self.storage.db[self.storage.plans_collection].insert(
{"_id": "huge",
"description": "some cool huge plan",
"config": {"serviceofferingid": "abcdef123459"}}
)
def test_set_team_quota(self):
q = self.storage.set_team_quota("myteam", 8)
used, quota = self.storage.find_team_quota("myteam")
self.assertEqual([], used)
self.assertEqual(8, quota)
self.assertEqual(used, q["used"])
self.assertEqual(quota, q["quota"])
def test_list_plans(self):
plans = self.storage.list_plans()
expected = [
{"name": "small", "description": "some cool plan",
"config": {"serviceofferingid": "abcdef123456"}},
{"name": "huge", "description": "some cool huge plan",
"config": {"serviceofferingid": "abcdef123459"}},
]
self.assertEqual(expected, [p.to_dict() for p in plans])
def test_find_plan(self):
plan = self.storage.find_plan("small")
expected = {"name": "small", "description": "some cool plan",
"config": {"serviceofferingid": "abcdef123456"}}
self.assertEqual(expected, plan.to_dict())
with self.assertRaises(storage.PlanNotFoundError):
self.storage.find_plan("something that doesn't exist")
def test_store_plan(self):
p = plan.Plan(name="super_huge", description="very huge thing",
config={"serviceofferingid": "abcdef123"})
self.storage.store_plan(p)
got_plan = self.storage.find_plan(p.name)
self.assertEqual(p.to_dict(), got_plan.to_dict())
def test_store_plan_duplicate(self):
p = plan.Plan(name="small", description="small thing",
config={"serviceofferingid": "abcdef123"})
with self.assertRaises(storage.DuplicateError):
self.storage.store_plan(p)
def test_update_plan(self):
p = plan.Plan(name="super_huge", description="very huge thing",
config={"serviceofferingid": "abcdef123"})
self.storage.store_plan(p)
self.storage.update_plan(p.name, description="wat?",
config={"serviceofferingid": "abcdef123459"})
p = self.storage.find_plan(p.name)
self.assertEqual("super_huge", p.name)
self.assertEqual("wat?", p.description)
self.assertEqual({"serviceofferingid": "abcdef123459"}, p.config)
def test_update_plan_partial(self):
p = plan.Plan(name="super_huge", description="very huge thing",
config={"serviceofferingid": "abcdef123"})
self.storage.store_plan(p)
self.storage.update_plan(p.name, config={"serviceofferingid": "abcdef123459"})
p = self.storage.find_plan(p.name)
self.assertEqual("super_huge", p.name)
self.assertEqual("very huge thing", p.description)
self.assertEqual({"serviceofferingid": "abcdef123459"}, p.config)
def test_update_plan_not_found(self):
with self.assertRaises(storage.PlanNotFoundError):
self.storage.update_plan("my_plan", description="woot")
def test_delete_plan(self):
p = plan.Plan(name="super_huge", description="very huge thing",
config={"serviceofferingid": "abcdef123"})
self.storage.store_plan(p)
self.storage.delete_plan(p.name)
with self.assertRaises(storage.PlanNotFoundError):
self.storage.find_plan(p.name)
def test_delete_plan_not_found(self):
with self.assertRaises(storage.PlanNotFoundError):
self.storage.delete_plan("super_huge")
def test_instance_metadata_storage(self):
self.storage.store_instance_metadata("myinstance", plan="small")
inst_metadata = self.storage.find_instance_metadata("myinstance")
self.assertEqual({"_id": "myinstance",
"plan": "small"}, inst_metadata)
self.storage.store_instance_metadata("myinstance", plan="medium")
inst_metadata = self.storage.find_instance_metadata("myinstance")
self.assertEqual({"_id": "myinstance", "plan": "medium"}, inst_metadata)
self.storage.remove_instance_metadata("myinstance")
inst_metadata = self.storage.find_instance_metadata("myinstance")
self.assertIsNone(inst_metadata)
| vfiebig/rpaas | tests/test_storage.py | Python | bsd-3-clause | 5,053 |
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from contextlib import contextmanager
from datetime import date, datetime, time
from functools import partial
import re
from typing import Iterator, Optional, Union, overload
import warnings
import numpy as np
import pandas._libs.lib as lib
from pandas.core.dtypes.common import is_datetime64tz_dtype, is_dict_like, is_list_like
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
from pandas.core.tools.datetimes import to_datetime
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
# -----------------------------------------------------------------------------
# -- Helper functions
_SQLALCHEMY_INSTALLED = None
def _is_sqlalchemy_connectable(con):
global _SQLALCHEMY_INSTALLED
if _SQLALCHEMY_INSTALLED is None:
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
except ImportError:
_SQLALCHEMY_INSTALLED = False
if _SQLALCHEMY_INSTALLED:
import sqlalchemy # noqa: F811
return isinstance(con, sqlalchemy.engine.Connectable)
else:
return False
def _convert_params(sql, params):
"""Convert SQL and params args to DBAPI2.0 compliant format."""
args = [sql]
if params is not None:
if hasattr(params, "keys"): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _process_parse_dates_argument(parse_dates):
"""Process parse_dates argument for read_sql functions"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
elif not hasattr(parse_dates, "__iter__"):
parse_dates = [parse_dates]
return parse_dates
def _handle_date_column(col, utc=None, format=None):
if isinstance(format, dict):
return to_datetime(col, errors="ignore", **format)
else:
# Allow passing of formatting string for integers
# GH17855
if format is None and (
issubclass(col.dtype.type, np.floating)
or issubclass(col.dtype.type, np.integer)
):
format = "s"
if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]:
return to_datetime(col, errors="coerce", unit=format, utc=utc)
elif is_datetime64tz_dtype(col.dtype):
# coerce to UTC timezone
# GH11216
return to_datetime(col, utc=True)
else:
return to_datetime(col, errors="coerce", format=format, utc=utc)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
# we want to coerce datetime64_tz dtypes for now to UTC
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.items():
if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
def _wrap_result(data, columns, index_col=None, coerce_float=True, parse_dates=None):
"""Wrap result set of query in a DataFrame."""
frame = DataFrame.from_records(data, columns=columns, coerce_float=coerce_float)
frame = _parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
SQL query to be executed.
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by the
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
# -----------------------------------------------------------------------------
# -- Read and write to DataFrames
@overload
def read_sql_table(
table_name,
con,
schema=None,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
chunksize: None = None,
) -> DataFrame:
...
@overload
def read_sql_table(
table_name,
con,
schema=None,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
chunksize: int = 1,
) -> Iterator[DataFrame]:
...
def read_sql_table(
table_name,
con,
schema=None,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
chunksize: Optional[int] = None,
) -> Union[DataFrame, Iterator[DataFrame]]:
"""
Read SQL database table into a DataFrame.
Given a table name and a SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : str
Name of SQL table in database.
con : SQLAlchemy connectable or str
A database URI could be provided as as str.
SQLite DBAPI connection mode not supported.
schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default None
List of column names to select from SQL table.
chunksize : int, default None
If specified, returns an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
A SQL table is returned as two-dimensional data structure with labeled
axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information will be converted to UTC.
Examples
--------
>>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
"""
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError(
"read_sql_table only supported for SQLAlchemy connectable."
)
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError as err:
raise ValueError(f"Table {table_name} not found") from err
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
table_name,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
if table is not None:
return table
else:
raise ValueError(f"Table {table_name} not found", con)
@overload
def read_sql_query(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
chunksize: None = None,
) -> DataFrame:
...
@overload
def read_sql_query(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
chunksize: int = 1,
) -> Iterator[DataFrame]:
...
def read_sql_query(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
chunksize: Optional[int] = None,
) -> Union[DataFrame, Iterator[DataFrame]]:
"""
Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : str SQL query or SQLAlchemy Selectable (select or text object)
SQL query to be executed.
con : SQLAlchemy connectable(engine/connection), database str URI,
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC.
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
@overload
def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize: None = None,
) -> DataFrame:
...
@overload
def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize: int = 1,
) -> Iterator[DataFrame]:
...
def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize: Optional[int] = None,
) -> Union[DataFrame, Iterator[DataFrame]]:
"""
Read SQL query or database table into a DataFrame.
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (for backward compatibility). It will delegate
to the specific function depending on the provided input. A SQL query
will be routed to ``read_sql_query``, while a database table name will
be routed to ``read_sql_table``. Note that the delegated function might
have more specific notes about their functionality not listed here.
Parameters
----------
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : SQLAlchemy connectable (engine/connection) or database str URI
or DBAPI2 connection (fallback mode).
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
for engine disposal and connection closure for the SQLAlchemy connectable. See
`here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql_query : Read SQL query into a DataFrame.
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
try:
_is_table_name = pandas_sql.has_table(sql)
except Exception:
# using generic exception to catch errors from sql drivers (GH24988)
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
else:
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
def to_sql(
frame,
name,
con,
schema=None,
if_exists="fail",
index=True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
) -> None:
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame, Series
name : str
Name of SQL table.
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
schema : str, optional
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column.
index_label : str or sequence, optional
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 fallback mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
- None : Uses standard SQL ``INSERT`` clause (one per row).
- 'multi': Pass multiple values in a single ``INSERT`` clause.
- callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
"""
if if_exists not in ("fail", "replace", "append"):
raise ValueError(f"'{if_exists}' is not valid for if_exists")
pandas_sql = pandasSQL_builder(con, schema=schema)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError(
"'frame' argument should be either a Series or a DataFrame"
)
pandas_sql.to_sql(
frame,
name,
if_exists=if_exists,
index=index,
index_label=index_label,
schema=schema,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def has_table(table_name, con, schema=None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table.
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
def _engine_builder(con):
"""
Returns a SQLAlchemy engine from a URI (if con is a string)
else it just return con without modifying it.
"""
global _SQLALCHEMY_INSTALLED
if isinstance(con, str):
try:
import sqlalchemy
except ImportError:
_SQLALCHEMY_INSTALLED = False
else:
con = sqlalchemy.create_engine(con)
return con
return con
def pandasSQL_builder(con, schema=None, meta=None, is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters.
"""
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
elif isinstance(con, str):
raise ImportError("Using URI string without sqlalchemy installed.")
else:
return SQLiteDatabase(con, is_cursor=is_cursor)
class SQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type conversions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(
self,
name,
pandas_sql_engine,
frame=None,
index=True,
if_exists="fail",
prefix="pandas",
index_label=None,
schema=None,
keys=None,
dtype=None,
):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError(f"Could not init table '{name}'")
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.connectable))
def _execute_create(self):
# Inserting table into database, add to MetaData object
self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create()
def create(self):
if self.exists():
if self.if_exists == "fail":
raise ValueError(f"Table '{self.name}' already exists.")
elif self.if_exists == "replace":
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == "append":
pass
else:
raise ValueError(f"'{self.if_exists}' is not valid for if_exists")
else:
self._execute_create()
def _execute_insert(self, conn, keys, data_iter):
"""
Execute SQL statement inserting data
Parameters
----------
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : generator of list
Each item contains a list of values to be inserted
"""
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(self.table.insert(), data)
def _execute_insert_multi(self, conn, keys, data_iter):
"""
Alternative to _execute_insert for DBs support multivalue INSERT.
Note: multi-value insert is usually faster for analytics DBs
and tables containing a few columns
but performance degrades quickly with increase of columns.
"""
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(self.table.insert(data))
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(f"duplicate name in index/columns: {err}") from err
else:
temp = self.frame
column_names = list(map(str, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
for i, (_, ser) in enumerate(temp.items()):
vals = ser._values
if vals.dtype.kind == "M":
d = vals.to_pydatetime()
elif vals.dtype.kind == "m":
# store as integers, see GH#6921, GH#7076
d = vals.view("i8").astype(object)
else:
d = vals.astype(object)
assert isinstance(d, np.ndarray), type(d)
if ser._can_hold_na:
# Note: this will miss timedeltas since they are converted to int
mask = isna(d)
d[mask] = None
data_list[i] = d
return column_names, data_list
def insert(self, chunksize=None, method=None):
# set insert method
if method is None:
exec_insert = self._execute_insert
elif method == "multi":
exec_insert = self._execute_insert_multi
elif callable(method):
exec_insert = partial(method, self)
else:
raise ValueError(f"Invalid parameter `method`: {method}")
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError("chunksize argument should be non-zero")
chunks = int(nrows / chunksize) + 1
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
exec_insert(conn, keys, chunk_iter)
def _query_iterator(
self, result, chunksize, columns, coerce_float=True, parse_dates=None
):
"""Return generator through chunked result set."""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float
)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(self, coerce_float=True, parse_dates=None, columns=None, chunksize=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
for idx in self.index[::-1]:
cols.insert(0, self.table.c[idx])
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(
result,
chunksize,
column_names,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float
)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
f"levels, which is {nlevels}"
)
else:
return index_label
# return the used column labels for the index columns
if (
nlevels == 1
and "index" not in self.frame.columns
and self.frame.index.name is None
):
return ["index"]
else:
return [
l if l is not None else f"level_{i}"
for i, l in enumerate(self.frame.index.names)
]
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, str):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(self.frame.index._get_level_values(i))
column_names_and_types.append((str(idx_label), idx_type, True))
column_names_and_types += [
(str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import Table, Column, PrimaryKeyConstraint
column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
columns = [
Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types
]
if self.keys is not None:
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk")
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
from sqlalchemy.schema import MetaData
meta = MetaData(self.pd_sql, schema=schema)
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# Handle date parsing upfront; don't try to convert columns
# twice
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(df_col, format=fmt)
continue
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (
col_type is datetime
or col_type is date
or col_type is DatetimeTZDtype
):
# Convert tz-aware Datetime SQL columns to UTC
utc = col_type is DatetimeTZDtype
self.frame[col_name] = _handle_date_column(df_col, utc=utc)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype("int64") or col_type is bool:
self.frame[col_name] = df_col.astype(col_type, copy=False)
except KeyError:
pass # this column not in results
def _sqlalchemy_type(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return self.dtype[col.name]
# Infer type of column, while ignoring missing values.
# Needed for inserting typed data containing NULLs, GH 8778.
col_type = lib.infer_dtype(col, skipna=True)
from sqlalchemy.types import (
BigInteger,
Integer,
Float,
Text,
Boolean,
DateTime,
Date,
Time,
TIMESTAMP,
)
if col_type == "datetime64" or col_type == "datetime":
# GH 9086: TIMESTAMP is the suggested type if the column contains
# timezone information
try:
if col.dt.tz is not None:
return TIMESTAMP(timezone=True)
except AttributeError:
# The column is actually a DatetimeIndex
# GH 26761 or an Index with date-like data e.g. 9999-01-01
if getattr(col, "tz", None) is not None:
return TIMESTAMP(timezone=True)
return DateTime
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=8,
)
return BigInteger
elif col_type == "floating":
if col.dtype == "float32":
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == "integer":
if col.dtype == "int32":
return Integer
else:
return BigInteger
elif col_type == "boolean":
return Boolean
elif col_type == "date":
return Date
elif col_type == "time":
return Time
elif col_type == "complex":
raise ValueError("Complex datatypes not supported")
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date, TIMESTAMP
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype("int64")
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql.
"""
def read_sql(self, *args, **kwargs):
raise ValueError(
"PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection"
)
def to_sql(self, *args, **kwargs):
raise ValueError(
"PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection"
)
class SQLDatabase(PandasSQL):
"""
This class enables conversion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction.
Parameters
----------
engine : SQLAlchemy connectable
Connectable to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
meta : SQLAlchemy MetaData object, default None
If provided, this MetaData object is used instead of a newly
created. This allows to specify database flavor specific
arguments in the MetaData object.
"""
def __init__(self, engine, schema=None, meta=None):
self.connectable = engine
if not meta:
from sqlalchemy.schema import MetaData
meta = MetaData(self.connectable, schema=schema)
self.meta = meta
@contextmanager
def run_transaction(self):
with self.connectable.begin() as tx:
if hasattr(tx, "execute"):
yield tx
else:
yield self.connectable
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy connectable"""
return self.connectable.execution_options(no_parameters=True).execute(
*args, **kwargs
)
def read_table(
self,
table_name,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
schema=None,
chunksize=None,
):
"""
Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : boolean, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
@staticmethod
def _query_iterator(
result, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None
):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
def read_query(
self,
sql,
index_col=None,
coerce_float=True,
parse_dates=None,
params=None,
chunksize=None,
):
"""
Read SQL query into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed.
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
"""
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(
result,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
else:
data = result.fetchall()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
return frame
read_sql = read_query
def to_sql(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
chunksize=None,
dtype=None,
method=None,
):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column.
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type. If all columns are of the same type, one
single value can be used.
method : {None', 'multi', callable}, default None
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
from sqlalchemy.types import to_instance, TypeEngine
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError(f"The type of {col} is not a SQLAlchemy type")
table = SQLTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
schema=schema,
dtype=dtype,
)
table.create()
table.insert(chunksize, method=method)
if not name.isdigit() and not name.islower():
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
engine = self.connectable.engine
with self.connectable.connect() as conn:
table_names = engine.table_names(
schema=schema or self.meta.schema, connection=conn
)
if name not in table_names:
msg = (
f"The provided table name '{name}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
)
warnings.warn(msg, UserWarning)
@property
def tables(self):
return self.meta.tables
def has_table(self, name, schema=None):
return self.connectable.run_callable(
self.connectable.dialect.has_table, name, schema or self.meta.schema
)
def get_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if schema:
tbl = self.meta.tables.get(".".join([schema, table_name]))
else:
tbl = self.meta.tables.get(table_name)
# Avoid casting double-precision floats into decimals
from sqlalchemy import Numeric
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(only=[table_name], schema=schema)
self.get_table(table_name, schema).drop()
self.meta.clear()
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLTable(
table_name, self, frame=frame, index=False, keys=keys, dtype=dtype
)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# sqlite-specific sql strings and handler class
# dictionary used for readability purposes
_SQL_TYPES = {
"string": "TEXT",
"floating": "REAL",
"integer": "INTEGER",
"datetime": "TIMESTAMP",
"date": "DATE",
"time": "TIME",
"boolean": "INTEGER",
}
def _get_unicode_name(name):
try:
uname = str(name).encode("utf-8", "strict").decode("utf-8")
except UnicodeError as err:
raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err
return uname
def _get_valid_sqlite_name(name):
# See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
# -for-sqlite-table-column-names-in-python
# Ensure the string can be encoded as UTF-8.
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
nul_index = uname.find("\x00")
if nul_index >= 0:
raise ValueError("SQLite identifier cannot contain NULs")
return '"' + uname.replace('"', '""') + '"'
_SAFE_NAMES_WARNING = (
"The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to underscores."
)
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
Instead of a table variable just use the Create Table statement.
"""
def __init__(self, *args, **kwargs):
# GH 8341
# register an adapter callable for datetime.time object
import sqlite3
# this will transform time(12,34,56,789) into '12:34:56.000789'
# (this is what sqlalchemy does)
sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
super().__init__(*args, **kwargs)
def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self, *, num_rows):
names = list(map(str, self.frame.columns))
wld = "?" # wildcard char
escape = _get_valid_sqlite_name
if self.index is not None:
for idx in self.index[::-1]:
names.insert(0, idx)
bracketed_names = [escape(column) for column in names]
col_names = ",".join(bracketed_names)
row_wildcards = ",".join([wld] * len(names))
wildcards = ",".join(f"({row_wildcards})" for _ in range(num_rows))
insert_statement = (
f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}"
)
return insert_statement
def _execute_insert(self, conn, keys, data_iter):
data_list = list(data_iter)
conn.executemany(self.insert_statement(num_rows=1), data_list)
def _execute_insert_multi(self, conn, keys, data_iter):
data_list = list(data_iter)
flattened_data = [x for row in data_list for x in row]
conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data)
def _create_table_setup(self):
"""
Return a list of SQL statements that creates a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements.
"""
column_names_and_types = self._get_column_names_and_types(self._sql_type_name)
pat = re.compile(r"\s+")
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [
escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types
]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join(escape(c) for c in keys)
create_tbl_stmts.append(
f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})"
)
create_stmts = [
"CREATE TABLE "
+ escape(self.name)
+ " (\n"
+ ",\n ".join(create_tbl_stmts)
+ "\n)"
]
ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join(escape(c) for c in ix_cols)
create_stmts.append(
"CREATE INDEX "
+ escape("ix_" + self.name + "_" + cnames)
+ "ON "
+ escape(self.name)
+ " ("
+ cnames_br
+ ")"
)
return create_stmts
def _sql_type_name(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return dtype[col.name]
# Infer type of column, while ignoring missing values.
# Needed for inserting typed data containing NULLs, GH 8778.
col_type = lib.infer_dtype(col, skipna=True)
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=8,
)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError("Complex datatypes not supported")
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type]
class SQLiteDatabase(PandasSQL):
"""
Version of SQLDatabase to support SQLite connections (fallback without
SQLAlchemy). This should only be used internally.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con, is_cursor=False):
self.is_cursor = is_cursor
self.con = con
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except Exception:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
else:
cur = self.con.cursor()
try:
cur.execute(*args, **kwargs)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception as inner_exc: # pragma: no cover
ex = DatabaseError(
f"Execution failed on sql: {args[0]}\n{exc}\nunable to rollback"
)
raise ex from inner_exc
ex = DatabaseError(f"Execution failed on sql '{args[0]}': {exc}")
raise ex from exc
@staticmethod
def _query_iterator(
cursor, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None
):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
def read_query(
self,
sql,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
chunksize=None,
):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(
cursor,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
chunksize=None,
dtype=None,
method=None,
):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: string
Name of SQL table.
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if it does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatibility with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string. If all columns are of the same type, one single value
can be used.
method : {None, 'multi', callable}, default None
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError(f"{col} ({my_type}) not a string")
table = SQLiteTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
dtype=dtype,
)
table.create()
table.insert(chunksize, method)
def has_table(self, name, schema=None):
# TODO(wesm): unused?
# escape = _get_valid_sqlite_name
# esc_name = escape(name)
wld = "?"
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
return len(self.execute(query, [name]).fetchall()) > 0
def get_table(self, table_name, schema=None):
return None # not supported in fallback mode
def drop_table(self, name, schema=None):
drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLiteTable(
table_name, self, frame=frame, index=False, keys=keys, dtype=dtype
)
return str(table.sql_schema())
def get_schema(frame, name, keys=None, con=None, dtype=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
pandas_sql = pandasSQL_builder(con=con)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
| TomAugspurger/pandas | pandas/io/sql.py | Python | bsd-3-clause | 62,333 |
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.checkout.scm.scm_mock import MockSCM
from webkitpy.common.net.buildbot.buildbot_mock import MockBuildBot
from webkitpy.common.net.web_mock import MockWeb
from webkitpy.common.system.systemhost_mock import MockSystemHost
# New-style ports need to move down into webkitpy.common.
from webkitpy.layout_tests.port.factory import PortFactory
from webkitpy.layout_tests.port.test import add_unit_tests_to_mock_filesystem
class MockHost(MockSystemHost):
def __init__(self, log_executive=False, executive_throws_when_run=None, initialize_scm_by_default=True, web=None, scm=None):
MockSystemHost.__init__(self, log_executive, executive_throws_when_run)
add_unit_tests_to_mock_filesystem(self.filesystem)
self.web = web or MockWeb()
self._scm = scm
# FIXME: we should never initialize the SCM by default, since the real
# object doesn't either. This has caused at least one bug (see bug 89498).
if initialize_scm_by_default:
self.initialize_scm()
self.buildbot = MockBuildBot()
# Note: We're using a real PortFactory here. Tests which don't wish to depend
# on the list of known ports should override this with a MockPortFactory.
self.port_factory = PortFactory(self)
def initialize_scm(self, patch_directories=None):
if not self._scm:
self._scm = MockSCM(filesystem=self.filesystem, executive=self.executive)
# Various pieces of code (wrongly) call filesystem.chdir(checkout_root).
# Making the checkout_root exist in the mock filesystem makes that chdir not raise.
self.filesystem.maybe_make_directory(self._scm.checkout_root)
def scm(self):
return self._scm
def scm_for_path(self, path):
# FIXME: consider supporting more than one SCM so that we can do more comprehensive testing.
self.initialize_scm()
return self._scm
def checkout(self):
return self._checkout
| was4444/chromium.src | third_party/WebKit/Tools/Scripts/webkitpy/common/host_mock.py | Python | bsd-3-clause | 3,518 |
import numpy as np
from ._layout import Layout
from ._multivector import MultiVector
class ConformalLayout(Layout):
r"""
A layout for a conformal algebra, which adds extra constants and helpers.
Typically these should be constructed via :func:`clifford.conformalize`.
.. versionadded:: 1.2.0
Attributes
----------
ep : MultiVector
The first added basis element, :math:`e_{+}`, usually with :math:`e_{+}^2 = +1`
en : MultiVector
The first added basis element, :math:`e_{-}`, usually with :math:`e_{-}^2 = -1`
eo : MultiVector
The null basis vector at the origin, :math:`e_o = 0.5(e_{-} - e_{+})`
einf : MultiVector
The null vector at infinity, :math:`e_\infty = e_{-} + e_{+}`
E0 : MultiVector
The minkowski subspace bivector, :math:`e_\infty \wedge e_o`
I_base : MultiVector
The pseudoscalar of the base ga, in cga layout
"""
def __init__(self, *args, layout=None, **kwargs):
super().__init__(*args, **kwargs)
self._base_layout = layout
ep, en = self.basis_vectors_lst[-2:]
# setup null basis, and minkowski subspace bivector
eo = .5 ^ (en - ep)
einf = en + ep
E0 = einf ^ eo
I_base = self.pseudoScalar*E0
# helper properties
self.ep = ep
self.en = en
self.eo = eo
self.einf = einf
self.E0 = E0
self.I_base = I_base
@classmethod
def _from_base_layout(cls, layout, added_sig=[1, -1], **kwargs) -> 'ConformalLayout':
""" helper to implement :func:`clifford.conformalize` """
sig_c = list(layout.sig) + added_sig
return cls(
sig_c,
ids=layout._basis_vector_ids.augmented_with(len(added_sig)),
layout=layout, **kwargs)
# some convenience functions
def up(self, x: MultiVector) -> MultiVector:
""" up-project a vector from GA to CGA """
try:
if x.layout == self._base_layout:
# vector is in original space, map it into conformal space
old_val = x.value
new_val = np.zeros(self.gaDims)
new_val[:len(old_val)] = old_val
x = self.MultiVector(value=new_val)
except(AttributeError):
# if x is a scalar it doesnt have layout but following
# will still work
pass
# then up-project into a null vector
return x + (.5 ^ ((x**2)*self.einf)) + self.eo
def homo(self, x: MultiVector) -> MultiVector:
""" homogenize a CGA vector """
return x/(-x | self.einf)[()]
def down(self, x: MultiVector) -> MultiVector:
""" down-project a vector from CGA to GA """
x_down = (self.homo(x) ^ self.E0)*self.E0
# new_val = x_down.value[:self.base_layout.gaDims]
# create vector in self.base_layout (not cga)
# x_down = self.base_layout.MultiVector(value=new_val)
return x_down
| arsenovic/clifford | clifford/_conformal_layout.py | Python | bsd-3-clause | 2,999 |
"""
Menu-driven login system
Contribution - Griatch 2011
This is an alternative login system for Evennia, using the
contrib.menusystem module. As opposed to the default system it doesn't
use emails for authentication and also don't auto-creates a Character
with the same name as the Player (instead assuming some sort of
character-creation to come next).
Install is simple:
To your settings file, add/edit the line:
CMDSET_UNLOGGEDIN = "contrib.menu_login.UnloggedInCmdSet"
That's it. Reload the server and try to log in to see it.
The initial login "graphic" is taken from strings in the module given
by settings.CONNECTION_SCREEN_MODULE. You will want to copy the
template file in game/gamesrc/conf/examples up one level and re-point
the settings file to this custom module. you can then edit the string
in that module (at least comment out the default string that mentions
commands that are not available) and add something more suitable for
the initial splash screen.
"""
import re
import traceback
from django.conf import settings
from ev import managers
from ev import utils, logger, create_player
from ev import Command, CmdSet
from ev import syscmdkeys
from src.server.models import ServerConfig
from contrib.menusystem import MenuNode, MenuTree
CMD_LOGINSTART = syscmdkeys.CMD_LOGINSTART
CMD_NOINPUT = syscmdkeys.CMD_NOINPUT
CMD_NOMATCH = syscmdkeys.CMD_NOMATCH
CONNECTION_SCREEN_MODULE = settings.CONNECTION_SCREEN_MODULE
# Commands run on the unloggedin screen. Note that this is not using
# settings.UNLOGGEDIN_CMDSET but the menu system, which is why some are
# named for the numbers in the menu.
#
# Also note that the menu system will automatically assign all
# commands used in its structure a property "menutree" holding a reference
# back to the menutree. This allows the commands to do direct manipulation
# for example by triggering a conditional jump to another node.
#
# Menu entry 1a - Entering a Username
class CmdBackToStart(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("START")
class CmdUsernameSelect(Command):
"""
Handles the entering of a username and
checks if it exists.
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
player = managers.players.get_player_from_name(self.args)
if not player:
self.caller.msg("{rThis account name couldn't be found. Did you create it? If you did, make sure you spelled it right (case doesn't matter).{n")
self.menutree.goto("node1a")
else:
# store the player so next step can find it
self.menutree.player = player
self.caller.msg(echo=False)
self.menutree.goto("node1b")
# Menu entry 1b - Entering a Password
class CmdPasswordSelectBack(Command):
"""
Steps back from the Password selection
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("node1a")
self.caller.msg(echo=True)
class CmdPasswordSelect(Command):
"""
Handles the entering of a password and logs into the game.
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.msg(echo=True)
if not hasattr(self.menutree, "player"):
self.caller.msg("{rSomething went wrong! The player was not remembered from last step!{n")
self.menutree.goto("node1a")
return
player = self.menutree.player
if not player.check_password(self.args):
self.caller.msg("{rIncorrect password.{n")
self.menutree.goto("node1b")
return
# before going on, check eventual bans
bans = ServerConfig.objects.conf("server_bans")
if bans and (any(tup[0]==player.name.lower() for tup in bans)
or
any(tup[2].match(self.caller.address) for tup in bans if tup[2])):
# this is a banned IP or name!
string = "{rYou have been banned and cannot continue from here."
string += "\nIf you feel this ban is in error, please email an admin.{x"
self.caller.msg(string)
self.caller.sessionhandler.disconnect(self.caller, "Good bye! Disconnecting...")
return
# we are ok, log us in.
self.caller.msg("{gWelcome %s! Logging in ...{n" % player.key)
#self.caller.session_login(player)
self.caller.sessionhandler.login(self.caller, player)
# abort menu, do cleanup.
self.menutree.goto("END")
# we are logged in. Look around.
character = player.character
if character:
character.execute_cmd("look")
else:
# we have no character yet; use player's look, if it exists
player.execute_cmd("look")
# Menu entry 2a - Creating a Username
class CmdUsernameCreate(Command):
"""
Handle the creation of a valid username
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
playername = self.args
# sanity check on the name
if not re.findall('^[\w. @+-]+$', playername) or not (3 <= len(playername) <= 30):
self.caller.msg("\n\r {rAccount name should be between 3 and 30 characters. Letters, spaces, dig\
its and @/./+/-/_ only.{n") # this echoes the restrictions made by django's auth module.
self.menutree.goto("node2a")
return
if managers.players.get_player_from_name(playername):
self.caller.msg("\n\r {rAccount name %s already exists.{n" % playername)
self.menutree.goto("node2a")
return
# store the name for the next step
self.menutree.playername = playername
self.caller.msg(echo=False)
self.menutree.goto("node2b")
# Menu entry 2b - Creating a Password
class CmdPasswordCreateBack(Command):
"Step back from the password creation"
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.msg(echo=True)
self.menutree.goto("node2a")
class CmdPasswordCreate(Command):
"Handle the creation of a password. This also creates the actual Player/User object."
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
password = self.args
self.caller.msg(echo=False)
if not hasattr(self.menutree, 'playername'):
self.caller.msg("{rSomething went wrong! Playername not remembered from previous step!{n")
self.menutree.goto("node2a")
return
playername = self.menutree.playername
if len(password) < 3:
# too short password
string = "{rYour password must be at least 3 characters or longer."
string += "\n\rFor best security, make it at least 8 characters "
string += "long, avoid making it a real word and mix numbers "
string += "into it.{n"
self.caller.msg(string)
self.menutree.goto("node2b")
return
# everything's ok. Create the new player account. Don't create
# a Character here.
try:
permissions = settings.PERMISSION_PLAYER_DEFAULT
typeclass = settings.BASE_PLAYER_TYPECLASS
new_player = create_player(playername, None, password,
typeclass=typeclass,
permissions=permissions)
if not new_player:
self.msg("There was an error creating the Player. This error was logged. Contact an admin.")
self.menutree.goto("START")
return
utils.init_new_player(new_player)
# join the new player to the public channel
pchanneldef = settings.CHANNEL_PUBLIC
if pchanneldef:
pchannel = managers.channels.get_channel(pchanneldef[0])
if not pchannel.connect(new_player):
string = "New player '%s' could not connect to public channel!" % new_player.key
logger.log_errmsg(string)
# tell the caller everything went well.
string = "{gA new account '%s' was created. Now go log in from the menu!{n"
self.caller.msg(string % (playername))
self.menutree.goto("START")
except Exception:
# We are in the middle between logged in and -not, so we have
# to handle tracebacks ourselves at this point. If we don't, we
# won't see any errors at all.
string = "%s\nThis is a bug. Please e-mail an admin if the problem persists."
self.caller.msg(string % (traceback.format_exc()))
logger.log_errmsg(traceback.format_exc())
# Menu entry 3 - help screen
LOGIN_SCREEN_HELP = \
"""
Welcome to %s!
To login you need to first create an account. This is easy and
free to do: Choose option {w(1){n in the menu and enter an account
name and password when prompted. Obs- the account name is {wnot{n
the name of the Character you will play in the game!
It's always a good idea (not only here, but everywhere on the net)
to not use a regular word for your password. Make it longer than 3
characters (ideally 6 or more) and mix numbers and capitalization
into it. The password also handles whitespace, so why not make it
a small sentence - easy to remember, hard for a computer to crack.
Once you have an account, use option {w(2){n to log in using the
account name and password you specified.
Use the {whelp{n command once you're logged in to get more
aid. Hope you enjoy your stay!
(return to go back)""" % settings.SERVERNAME
# Menu entry 4
class CmdUnloggedinQuit(Command):
"""
We maintain a different version of the quit command
here for unconnected players for the sake of simplicity. The logged in
version is a bit more complicated.
"""
key = "4"
aliases = ["quit", "qu", "q"]
locks = "cmd:all()"
def func(self):
"Simply close the connection."
self.menutree.goto("END")
self.caller.sessionhandler.disconnect(self.caller, "Good bye! Disconnecting...")
# The login menu tree, using the commands above
START = MenuNode("START", text=utils.random_string_from_module(CONNECTION_SCREEN_MODULE),
links=["node1a", "node2a", "node3", "END"],
linktexts=["Log in with an existing account",
"Create a new account",
"Help",
"Quit"],
selectcmds=[None, None, None, CmdUnloggedinQuit])
node1a = MenuNode("node1a", text="Please enter your account name (empty to abort).",
links=["START", "node1b"],
helptext=["Enter the account name you previously registered with."],
keywords=[CMD_NOINPUT, CMD_NOMATCH],
selectcmds=[CmdBackToStart, CmdUsernameSelect],
nodefaultcmds=True) # if we don't, default help/look will be triggered by names starting with l/h ...
node1b = MenuNode("node1b", text="Please enter your password (empty to go back).",
links=["node1a", "END"],
keywords=[CMD_NOINPUT, CMD_NOMATCH],
selectcmds=[CmdPasswordSelectBack, CmdPasswordSelect],
nodefaultcmds=True)
node2a = MenuNode("node2a", text="Please enter your desired account name (empty to abort).",
links=["START", "node2b"],
helptext="Account name can max be 30 characters or fewer. Letters, spaces, digits and @/./+/-/_ only.",
keywords=[CMD_NOINPUT, CMD_NOMATCH],
selectcmds=[CmdBackToStart, CmdUsernameCreate],
nodefaultcmds=True)
node2b = MenuNode("node2b", text="Please enter your password (empty to go back).",
links=["node2a", "START"],
helptext="Your password cannot contain any characters.",
keywords=[CMD_NOINPUT, CMD_NOMATCH],
selectcmds=[CmdPasswordCreateBack, CmdPasswordCreate],
nodefaultcmds=True)
node3 = MenuNode("node3", text=LOGIN_SCREEN_HELP,
links=["START"],
helptext="",
keywords=[CMD_NOINPUT],
selectcmds=[CmdBackToStart])
# access commands
class UnloggedInCmdSet(CmdSet):
"Cmdset for the unloggedin state"
key = "UnloggedinState"
priority = 0
def at_cmdset_creation(self):
"Called when cmdset is first created"
self.add(CmdUnloggedinLook())
class CmdUnloggedinLook(Command):
"""
An unloggedin version of the look command. This is called by the server
when the player first connects. It sets up the menu before handing off
to the menu's own look command..
"""
key = CMD_LOGINSTART
locks = "cmd:all()"
def func(self):
"Execute the menu"
menu = MenuTree(self.caller, nodes=(START, node1a, node1b,
node2a, node2b, node3),
exec_end=None)
menu.start()
| GhostshipSoftware/avaloria | contrib/menu_login.py | Python | bsd-3-clause | 13,491 |
#!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Android's lint tool."""
import argparse
import os
import re
import sys
import traceback
from xml.dom import minidom
from util import build_utils
_SRC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..'))
def _OnStaleMd5(changes, lint_path, config_path, processed_config_path,
manifest_path, result_path, product_dir, sources, jar_path,
cache_dir, android_sdk_version, resource_dir=None,
classpath=None, can_fail_build=False, silent=False):
def _RelativizePath(path):
"""Returns relative path to top-level src dir.
Args:
path: A path relative to cwd.
"""
return os.path.relpath(os.path.abspath(path), _SRC_ROOT)
def _ProcessConfigFile():
if not config_path or not processed_config_path:
return
if not build_utils.IsTimeStale(processed_config_path, [config_path]):
return
with open(config_path, 'rb') as f:
content = f.read().replace(
'PRODUCT_DIR', _RelativizePath(product_dir))
with open(processed_config_path, 'wb') as f:
f.write(content)
def _ProcessResultFile():
with open(result_path, 'rb') as f:
content = f.read().replace(
_RelativizePath(product_dir), 'PRODUCT_DIR')
with open(result_path, 'wb') as f:
f.write(content)
def _ParseAndShowResultFile():
dom = minidom.parse(result_path)
issues = dom.getElementsByTagName('issue')
if not silent:
print >> sys.stderr
for issue in issues:
issue_id = issue.attributes['id'].value
message = issue.attributes['message'].value
location_elem = issue.getElementsByTagName('location')[0]
path = location_elem.attributes['file'].value
line = location_elem.getAttribute('line')
if line:
error = '%s:%s %s: %s [warning]' % (path, line, message, issue_id)
else:
# Issues in class files don't have a line number.
error = '%s %s: %s [warning]' % (path, message, issue_id)
print >> sys.stderr, error.encode('utf-8')
for attr in ['errorLine1', 'errorLine2']:
error_line = issue.getAttribute(attr)
if error_line:
print >> sys.stderr, error_line.encode('utf-8')
return len(issues)
# Need to include all sources when a resource_dir is set so that resources are
# not marked as unused.
# TODO(agrieve): Figure out how IDEs do incremental linting.
if not resource_dir and changes.AddedOrModifiedOnly():
changed_paths = set(changes.IterChangedPaths())
sources = [s for s in sources if s in changed_paths]
with build_utils.TempDir() as temp_dir:
_ProcessConfigFile()
cmd = [
_RelativizePath(lint_path), '-Werror', '--exitcode', '--showall',
'--xml', _RelativizePath(result_path),
]
if jar_path:
# --classpath is just for .class files for this one target.
cmd.extend(['--classpath', _RelativizePath(jar_path)])
if processed_config_path:
cmd.extend(['--config', _RelativizePath(processed_config_path)])
if resource_dir:
cmd.extend(['--resources', _RelativizePath(resource_dir)])
if classpath:
# --libraries is the classpath (excluding active target).
cp = ':'.join(_RelativizePath(p) for p in classpath)
cmd.extend(['--libraries', cp])
# There may be multiple source files with the same basename (but in
# different directories). It is difficult to determine what part of the path
# corresponds to the java package, and so instead just link the source files
# into temporary directories (creating a new one whenever there is a name
# conflict).
src_dirs = []
def NewSourceDir():
new_dir = os.path.join(temp_dir, str(len(src_dirs)))
os.mkdir(new_dir)
src_dirs.append(new_dir)
return new_dir
def PathInDir(d, src):
return os.path.join(d, os.path.basename(src))
for src in sources:
src_dir = None
for d in src_dirs:
if not os.path.exists(PathInDir(d, src)):
src_dir = d
break
if not src_dir:
src_dir = NewSourceDir()
cmd.extend(['--sources', _RelativizePath(src_dir)])
os.symlink(os.path.abspath(src), PathInDir(src_dir, src))
project_dir = NewSourceDir()
if android_sdk_version:
# Create dummy project.properies file in a temporary "project" directory.
# It is the only way to add Android SDK to the Lint's classpath. Proper
# classpath is necessary for most source-level checks.
with open(os.path.join(project_dir, 'project.properties'), 'w') \
as propfile:
print >> propfile, 'target=android-{}'.format(android_sdk_version)
# Put the manifest in a temporary directory in order to avoid lint detecting
# sibling res/ and src/ directories (which should be pass explicitly if they
# are to be included).
if manifest_path:
os.symlink(os.path.abspath(manifest_path),
PathInDir(project_dir, manifest_path))
cmd.append(project_dir)
if os.path.exists(result_path):
os.remove(result_path)
env = {}
stderr_filter = None
if cache_dir:
env['_JAVA_OPTIONS'] = '-Duser.home=%s' % _RelativizePath(cache_dir)
# When _JAVA_OPTIONS is set, java prints to stderr:
# Picked up _JAVA_OPTIONS: ...
#
# We drop all lines that contain _JAVA_OPTIONS from the output
stderr_filter = lambda l: re.sub(r'.*_JAVA_OPTIONS.*\n?', '', l)
try:
build_utils.CheckOutput(cmd, cwd=_SRC_ROOT, env=env or None,
stderr_filter=stderr_filter)
except build_utils.CalledProcessError:
# There is a problem with lint usage
if not os.path.exists(result_path):
raise
# Sometimes produces empty (almost) files:
if os.path.getsize(result_path) < 10:
if can_fail_build:
raise
elif not silent:
traceback.print_exc()
return
# There are actual lint issues
try:
num_issues = _ParseAndShowResultFile()
except Exception: # pylint: disable=broad-except
if not silent:
print 'Lint created unparseable xml file...'
print 'File contents:'
with open(result_path) as f:
print f.read()
if not can_fail_build:
return
if can_fail_build and not silent:
traceback.print_exc()
# There are actual lint issues
try:
num_issues = _ParseAndShowResultFile()
except Exception: # pylint: disable=broad-except
if not silent:
print 'Lint created unparseable xml file...'
print 'File contents:'
with open(result_path) as f:
print f.read()
raise
_ProcessResultFile()
msg = ('\nLint found %d new issues.\n'
' - For full explanation refer to %s\n' %
(num_issues,
_RelativizePath(result_path)))
if config_path:
msg += (' - Wanna suppress these issues?\n'
' 1. Read comment in %s\n'
' 2. Run "python %s %s"\n' %
(_RelativizePath(config_path),
_RelativizePath(os.path.join(_SRC_ROOT, 'build', 'android',
'lint', 'suppress.py')),
_RelativizePath(result_path)))
if not silent:
print >> sys.stderr, msg
if can_fail_build:
raise Exception('Lint failed.')
def main():
parser = argparse.ArgumentParser()
build_utils.AddDepfileOption(parser)
parser.add_argument('--lint-path', required=True,
help='Path to lint executable.')
parser.add_argument('--product-dir', required=True,
help='Path to product dir.')
parser.add_argument('--result-path', required=True,
help='Path to XML lint result file.')
parser.add_argument('--cache-dir', required=True,
help='Path to the directory in which the android cache '
'directory tree should be stored.')
parser.add_argument('--platform-xml-path', required=True,
help='Path to api-platforms.xml')
parser.add_argument('--android-sdk-version',
help='Version (API level) of the Android SDK used for '
'building.')
parser.add_argument('--create-cache', action='store_true',
help='Mark the lint cache file as an output rather than '
'an input.')
parser.add_argument('--can-fail-build', action='store_true',
help='If set, script will exit with nonzero exit status'
' if lint errors are present')
parser.add_argument('--config-path',
help='Path to lint suppressions file.')
parser.add_argument('--enable', action='store_true',
help='Run lint instead of just touching stamp.')
parser.add_argument('--jar-path',
help='Jar file containing class files.')
parser.add_argument('--java-files',
help='Paths to java files.')
parser.add_argument('--manifest-path',
help='Path to AndroidManifest.xml')
parser.add_argument('--classpath', default=[], action='append',
help='GYP-list of classpath .jar files')
parser.add_argument('--processed-config-path',
help='Path to processed lint suppressions file.')
parser.add_argument('--resource-dir',
help='Path to resource dir.')
parser.add_argument('--silent', action='store_true',
help='If set, script will not log anything.')
parser.add_argument('--src-dirs',
help='Directories containing java files.')
parser.add_argument('--stamp',
help='Path to touch on success.')
args = parser.parse_args(build_utils.ExpandFileArgs(sys.argv[1:]))
if args.enable:
sources = []
if args.src_dirs:
src_dirs = build_utils.ParseGypList(args.src_dirs)
sources = build_utils.FindInDirectories(src_dirs, '*.java')
elif args.java_files:
sources = build_utils.ParseGypList(args.java_files)
if args.config_path and not args.processed_config_path:
parser.error('--config-path specified without --processed-config-path')
elif args.processed_config_path and not args.config_path:
parser.error('--processed-config-path specified without --config-path')
input_paths = [
args.lint_path,
args.platform_xml_path,
]
if args.config_path:
input_paths.append(args.config_path)
if args.jar_path:
input_paths.append(args.jar_path)
if args.manifest_path:
input_paths.append(args.manifest_path)
if args.resource_dir:
input_paths.extend(build_utils.FindInDirectory(args.resource_dir, '*'))
if sources:
input_paths.extend(sources)
classpath = []
for gyp_list in args.classpath:
classpath.extend(build_utils.ParseGypList(gyp_list))
input_paths.extend(classpath)
input_strings = []
if args.android_sdk_version:
input_strings.append(args.android_sdk_version)
if args.processed_config_path:
input_strings.append(args.processed_config_path)
output_paths = [ args.result_path ]
build_utils.CallAndWriteDepfileIfStale(
lambda changes: _OnStaleMd5(changes, args.lint_path,
args.config_path,
args.processed_config_path,
args.manifest_path, args.result_path,
args.product_dir, sources,
args.jar_path,
args.cache_dir,
args.android_sdk_version,
resource_dir=args.resource_dir,
classpath=classpath,
can_fail_build=args.can_fail_build,
silent=args.silent),
args,
input_paths=input_paths,
input_strings=input_strings,
output_paths=output_paths,
pass_changes=True,
depfile_deps=classpath)
if __name__ == '__main__':
sys.exit(main())
| was4444/chromium.src | build/android/gyp/lint.py | Python | bsd-3-clause | 12,623 |
"""
Concurrent downloaders
"""
import os
import sys
import signal
import logging
import itertools
from functools import partial
from concurrent.futures import ProcessPoolExecutor
from pomp.core.base import (
BaseCrawler, BaseDownloader, BaseCrawlException,
)
from pomp.contrib.urllibtools import UrllibDownloadWorker
from pomp.core.utils import iterator, Planned
log = logging.getLogger('pomp.contrib.concurrent')
def _run_download_worker(params, request):
pid = os.getpid()
log.debug("Download worker pid=%s params=%s", pid, params)
try:
# Initialize worker and call get_one method
return params['worker_class'](
**params.get('worker_kwargs', {})
).process(request)
except Exception:
log.exception(
"Exception on download worker pid=%s request=%s", pid, request
)
raise
def _run_crawler_worker(params, response):
pid = os.getpid()
log.debug("Crawler worker pid=%s params=%s", pid, params)
try:
# Initialize crawler worker
worker = params['worker_class'](**params.get('worker_kwargs', {}))
# process response
items = worker.extract_items(response)
next_requests = worker.next_requests(response)
if next_requests:
return list(
itertools.chain(
iterator(items),
iterator(next_requests),
)
)
return list(iterator(items))
except Exception:
log.exception(
"Exception on crawler worker pid=%s request=%s", pid, response
)
raise
class ConcurrentMixin(object):
def _done(self, request, done_future, future):
try:
response = future.result()
except Exception as e:
log.exception('Exception on %s', request)
done_future.set_result(BaseCrawlException(
request,
exception=e,
exc_info=sys.exc_info(),
))
else:
done_future.set_result(response)
class ConcurrentDownloader(BaseDownloader, ConcurrentMixin):
"""Concurrent ProcessPoolExecutor downloader
:param pool_size: size of ThreadPoolExecutor
:param timeout: request timeout in seconds
"""
def __init__(
self, worker_class,
worker_kwargs=None, pool_size=5,):
# configure executor
self.pool_size = pool_size
self.executor = ProcessPoolExecutor(max_workers=self.pool_size)
# prepare worker params
self.worker_params = {
'worker_class': worker_class,
'worker_kwargs': worker_kwargs or {},
}
# ctrl-c support for python2.x
# trap sigint
signal.signal(signal.SIGINT, lambda s, f: s)
super(ConcurrentDownloader, self).__init__()
def process(self, crawler, request):
# delegate request processing to the executor
future = self.executor.submit(
_run_download_worker, self.worker_params, request,
)
# build Planned object
done_future = Planned()
# when executor finish request - fire done_future
future.add_done_callback(
partial(self._done, request, done_future)
)
return done_future
def get_workers_count(self):
return self.pool_size
def stop(self, crawler):
self.executor.shutdown()
class ConcurrentUrllibDownloader(ConcurrentDownloader):
"""Concurrent ProcessPoolExecutor downloader for fetching data with urllib
:class:`pomp.contrib.SimpleDownloader`
:param pool_size: pool size of ProcessPoolExecutor
:param timeout: request timeout in seconds
"""
def __init__(self, pool_size=5, timeout=None):
super(ConcurrentUrllibDownloader, self).__init__(
pool_size=pool_size,
worker_class=UrllibDownloadWorker,
worker_kwargs={
'timeout': timeout
},
)
class ConcurrentCrawler(BaseCrawler, ConcurrentMixin):
"""Concurrent ProcessPoolExecutor crawler
:param pool_size: pool size of ProcessPoolExecutor
:param timeout: request timeout in seconds
"""
def __init__(self, worker_class, worker_kwargs=None, pool_size=5):
# configure executor
self.pool_size = pool_size
self.executor = ProcessPoolExecutor(max_workers=self.pool_size)
# prepare worker params
self.worker_params = {
'worker_class': worker_class,
'worker_kwargs': worker_kwargs or {},
}
# inherit ENTRY_REQUESTS from worker_class
self.ENTRY_REQUESTS = getattr(worker_class, 'ENTRY_REQUESTS', None)
def process(self, response):
# delegate response processing to the executor
future = self.executor.submit(
_run_crawler_worker, self.worker_params, response,
)
# build Planned object
done_future = Planned()
# when executor finish response processing - fire done_future
future.add_done_callback(
partial(self._done, response, done_future)
)
return done_future
| estin/pomp | pomp/contrib/concurrenttools.py | Python | bsd-3-clause | 5,193 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import time
import traceback
import urlparse
import random
import csv
from chrome_remote_control import page_test
from chrome_remote_control import util
from chrome_remote_control import wpr_modes
class PageState(object):
def __init__(self):
self.did_login = False
class PageRunner(object):
"""Runs a given test against a given test."""
def __init__(self, page_set):
self.page_set = page_set
def __enter__(self):
return self
def __exit__(self, *args):
self.Close()
def _ReorderPageSet(self, test_shuffle_order_file):
page_set_dict = {}
for page in self.page_set:
page_set_dict[page.url] = page
self.page_set.pages = []
with open(test_shuffle_order_file, 'rb') as csv_file:
csv_reader = csv.reader(csv_file)
csv_header = csv_reader.next()
if 'url' not in csv_header:
raise Exception('Unusable test_shuffle_order_file.')
url_index = csv_header.index('url')
for csv_row in csv_reader:
if csv_row[url_index] in page_set_dict:
self.page_set.pages.append(page_set_dict[csv_row[url_index]])
else:
raise Exception('Unusable test_shuffle_order_file.')
def Run(self, options, possible_browser, test, results):
archive_path = os.path.abspath(os.path.join(self.page_set.base_dir,
self.page_set.archive_path))
if options.wpr_mode == wpr_modes.WPR_OFF:
if os.path.isfile(archive_path):
possible_browser.options.wpr_mode = wpr_modes.WPR_REPLAY
else:
possible_browser.options.wpr_mode = wpr_modes.WPR_OFF
logging.warning("""
The page set archive %s does not exist, benchmarking against live sites!
Results won't be repeatable or comparable.
To fix this, either add svn-internal to your .gclient using
http://goto/read-src-internal, or create a new archive using --record.
""", os.path.relpath(archive_path))
credentials_path = None
if self.page_set.credentials_path:
credentials_path = os.path.join(self.page_set.base_dir,
self.page_set.credentials_path)
if not os.path.exists(credentials_path):
credentials_path = None
with possible_browser.Create() as b:
b.credentials.credentials_path = credentials_path
test.SetUpBrowser(b)
b.credentials.WarnIfMissingCredentials(self.page_set)
if not options.test_shuffle and options.test_shuffle_order_file is not\
None:
raise Exception('--test-shuffle-order-file requires --test-shuffle.')
# Set up a random generator for shuffling the page running order.
test_random = random.Random()
b.SetReplayArchivePath(archive_path)
with b.ConnectToNthTab(0) as tab:
if options.test_shuffle_order_file is None:
for _ in range(int(options.pageset_repeat)):
if options.test_shuffle:
test_random.shuffle(self.page_set)
for page in self.page_set:
for _ in range(int(options.page_repeat)):
self._RunPage(options, page, tab, test, results)
else:
self._ReorderPageSet(options.test_shuffle_order_file)
for page in self.page_set:
self._RunPage(options, page, tab, test, results)
def _RunPage(self, options, page, tab, test, results):
logging.info('Running %s' % page.url)
page_state = PageState()
try:
did_prepare = self.PreparePage(page, tab, page_state, results)
except Exception, ex:
logging.error('Unexpected failure while running %s: %s',
page.url, traceback.format_exc())
self.CleanUpPage(page, tab, page_state)
raise
if not did_prepare:
self.CleanUpPage(page, tab, page_state)
return
try:
test.Run(options, page, tab, results)
except page_test.Failure, ex:
logging.info('%s: %s', ex, page.url)
results.AddFailure(page, ex, traceback.format_exc())
return
except util.TimeoutException, ex:
logging.warning('Timed out while running %s', page.url)
results.AddFailure(page, ex, traceback.format_exc())
return
except Exception, ex:
logging.error('Unexpected failure while running %s: %s',
page.url, traceback.format_exc())
raise
finally:
self.CleanUpPage(page, tab, page_state)
def Close(self):
pass
@staticmethod
def WaitForPageToLoad(expression, tab):
def IsPageLoaded():
return tab.runtime.Evaluate(expression)
# Wait until the form is submitted and the page completes loading.
util.WaitFor(lambda: IsPageLoaded(), 60) # pylint: disable=W0108
def PreparePage(self, page, tab, page_state, results):
parsed_url = urlparse.urlparse(page.url)
if parsed_url[0] == 'file':
path = os.path.join(self.page_set.base_dir,
parsed_url.netloc) # pylint: disable=E1101
dirname, filename = os.path.split(path)
tab.browser.SetHTTPServerDirectory(dirname)
target_side_url = tab.browser.http_server.UrlOf(filename)
else:
target_side_url = page.url
if page.credentials:
page_state.did_login = tab.browser.credentials.LoginNeeded(
tab, page.credentials)
if not page_state.did_login:
msg = 'Could not login to %s on %s' % (page.credentials,
target_side_url)
logging.info(msg)
results.AddFailure(page, msg, "")
return False
tab.page.Navigate(target_side_url)
# Wait for unpredictable redirects.
if page.wait_time_after_navigate:
time.sleep(page.wait_time_after_navigate)
if page.wait_for_javascript_expression is not None:
self.WaitForPageToLoad(page.wait_for_javascript_expression, tab)
tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
return True
def CleanUpPage(self, page, tab, page_state): # pylint: disable=R0201
if page.credentials and page_state.did_login:
tab.browser.credentials.LoginNoLongerNeeded(tab, page.credentials)
tab.runtime.Evaluate("""chrome && chrome.benchmarking &&
chrome.benchmarking.closeConnections()""")
| junmin-zhu/chromium-rivertrail | tools/chrome_remote_control/chrome_remote_control/page_runner.py | Python | bsd-3-clause | 6,385 |
"""Migrate review conditions from settings
Revision ID: 933665578547
Revises: 02bf20df06b3
Create Date: 2020-04-02 11:13:58.931020
"""
import json
from collections import defaultdict
from uuid import uuid4
from alembic import context, op
from indico.modules.events.editing.models.editable import EditableType
# revision identifiers, used by Alembic.
revision = '933665578547'
down_revision = '02bf20df06b3'
branch_labels = None
depends_on = None
def upgrade():
if context.is_offline_mode():
raise Exception('This upgrade is only possible in online mode')
conn = op.get_bind()
for type_ in EditableType:
res = conn.execute(
"SELECT event_id, value FROM events.settings WHERE module = 'editing' AND name = %s",
(f'{type_.name}_review_conditions',),
)
for event_id, value in res:
for condition in value:
res2 = conn.execute(
'INSERT INTO event_editing.review_conditions (type, event_id) VALUES (%s, %s) RETURNING id',
(type_, event_id),
)
revcon_id = res2.fetchone()[0]
for file_type in condition[1]:
conn.execute('''
INSERT INTO event_editing.review_condition_file_types (file_type_id, review_condition_id)
VALUES (%s, %s)
''', (file_type, revcon_id),
)
conn.execute(
"DELETE FROM events.settings WHERE module = 'editing' AND name = %s",
(f'{type_.name}_review_conditions',),
)
def downgrade():
if context.is_offline_mode():
raise Exception('This upgrade is only possible in online mode')
conn = op.get_bind()
for type_ in EditableType:
res = conn.execute('SELECT id, event_id FROM event_editing.review_conditions WHERE type = %s', (type_.value,))
review_conditions = defaultdict(list)
for id, event_id in res:
file_types = conn.execute(
'SELECT file_type_id FROM event_editing.review_condition_file_types WHERE review_condition_id = %s',
(id,),
)
value = [str(uuid4()), [f[0] for f in file_types.fetchall()]]
review_conditions[event_id].append(value)
for key, value in review_conditions.items():
conn.execute(
"INSERT INTO events.settings (event_id, module, name, value) VALUES (%s, 'editing', %s, %s)",
(key, f'{type_.name}_review_conditions', json.dumps(value)),
)
conn.execute('DELETE FROM event_editing.review_condition_file_types')
conn.execute('DELETE FROM event_editing.review_conditions')
| ThiefMaster/indico | indico/migrations/versions/20200402_1113_933665578547_migrate_review_conditions_from_settings.py | Python | mit | 2,739 |
"""
PynamoDB exceptions
"""
from typing import Any, Optional
import botocore.exceptions
class PynamoDBException(Exception):
"""
A common exception class
"""
def __init__(self, msg: Optional[str] = None, cause: Optional[Exception] = None) -> None:
self.msg = msg
self.cause = cause
super(PynamoDBException, self).__init__(self.msg)
@property
def cause_response_code(self) -> Optional[str]:
return getattr(self.cause, 'response', {}).get('Error', {}).get('Code')
@property
def cause_response_message(self) -> Optional[str]:
return getattr(self.cause, 'response', {}).get('Error', {}).get('Message')
class PynamoDBConnectionError(PynamoDBException):
"""
A base class for connection errors
"""
msg = "Connection Error"
class DeleteError(PynamoDBConnectionError):
"""
Raised when an error occurs deleting an item
"""
msg = "Error deleting item"
class QueryError(PynamoDBConnectionError):
"""
Raised when queries fail
"""
msg = "Error performing query"
class ScanError(PynamoDBConnectionError):
"""
Raised when a scan operation fails
"""
msg = "Error performing scan"
class PutError(PynamoDBConnectionError):
"""
Raised when an item fails to be created
"""
msg = "Error putting item"
class UpdateError(PynamoDBConnectionError):
"""
Raised when an item fails to be updated
"""
msg = "Error updating item"
class GetError(PynamoDBConnectionError):
"""
Raised when an item fails to be retrieved
"""
msg = "Error getting item"
class TableError(PynamoDBConnectionError):
"""
An error involving a dynamodb table operation
"""
msg = "Error performing a table operation"
class DoesNotExist(PynamoDBException):
"""
Raised when an item queried does not exist
"""
msg = "Item does not exist"
class TableDoesNotExist(PynamoDBException):
"""
Raised when an operation is attempted on a table that doesn't exist
"""
def __init__(self, table_name: str) -> None:
msg = "Table does not exist: `{}`".format(table_name)
super(TableDoesNotExist, self).__init__(msg)
class TransactWriteError(PynamoDBException):
"""
Raised when a TransactWrite operation fails
"""
pass
class TransactGetError(PynamoDBException):
"""
Raised when a TransactGet operation fails
"""
pass
class InvalidStateError(PynamoDBException):
"""
Raises when the internal state of an operation context is invalid
"""
msg = "Operation in invalid state"
class AttributeDeserializationError(TypeError):
"""
Raised when attribute type is invalid
"""
def __init__(self, attr_name: str, attr_type: str):
msg = "Cannot deserialize '{}' attribute from type: {}".format(attr_name, attr_type)
super(AttributeDeserializationError, self).__init__(msg)
class AttributeNullError(ValueError):
def __init__(self, attr_name: str) -> None:
self.attr_path = attr_name
def __str__(self):
return f"Attribute '{self.attr_path}' cannot be None"
def prepend_path(self, attr_name: str) -> None:
self.attr_path = attr_name + '.' + self.attr_path
class VerboseClientError(botocore.exceptions.ClientError):
def __init__(self, error_response: Any, operation_name: str, verbose_properties: Optional[Any] = None):
""" Modify the message template to include the desired verbose properties """
if not verbose_properties:
verbose_properties = {}
self.MSG_TEMPLATE = (
'An error occurred ({{error_code}}) on request ({request_id}) '
'on table ({table_name}) when calling the {{operation_name}} '
'operation: {{error_message}}'
).format(request_id=verbose_properties.get('request_id'), table_name=verbose_properties.get('table_name'))
super(VerboseClientError, self).__init__(error_response, operation_name)
| pynamodb/PynamoDB | pynamodb/exceptions.py | Python | mit | 4,009 |
# -*- coding: utf-8 -*-
"""
handler base
~~~~~~~~~~~~
Presents a reasonable base class for a ``Handler`` object, which handles
responding to an arbitrary "request" for action. For example, ``Handler``
is useful for responding to HTTP requests *or* noncyclical realtime-style
requests, and acts as a base class for ``Page`` and ``ServiceHandler``.
:author: Sam Gammon <[email protected]>
:copyright: (c) Sam Gammon, 2014
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
"""
# stdlib
import itertools
# canteen core, util, logic
from ..core import injection
# noinspection PyUnresolvedReferences
class Handler(object):
""" Base class structure for a ``Handler`` of some request or desired action.
Specifies basic machinery for tracking a ``request`` alongside some form
of ``response``.
Also keeps track of relevant ``environ`` (potentially from WSGI) and sets
up a jump off point for DI-provided tools like logging, config, caching,
template rendering, etc. """
# @TODO(sgammon): HTTPify, convert to decorator
config = property(lambda self: {})
__agent__ = None # current `agent` details
__status__ = 200 # it's a glass-half-full kind of day, why not
__routes__ = None # route map adapter from werkzeug
__context__ = None # holds current runtime context, if any
__logging__ = None # internal logging slot
__runtime__ = None # reference up to the runtime
__environ__ = None # original WSGI environment
__request__ = None # lazy-loaded request object
__headers__ = None # buffer HTTP header access
__response__ = None # lazy-loaded response object
__callback__ = None # callback to send data (sync or async)
__content_type__ = None # response content type
# set owner and injection side
__owner__, __metaclass__ = "Handler", injection.Compound
def __init__(self, environ=None,
start_response=None,
runtime=None,
request=None,
response=None, **context):
""" Initialize a new ``Handler`` object with proper ``environ`` details and
inform it of larger world around it.
``Handler`` objects (much like ``Runtime`` objects) are designed to be
usable independently as a WSGI-style callable. Note that the first two
position parameters of this ``__init__`` are the venerable ``environ``
and ``start_response`` - dispatching this way is totally possible, but
providing ``runtime``, ``request`` and ``response`` allow tighter
integration with the underlying runtime.
Current execution details (internal to Canteen) are passed as ``kwargs``
and compounded as new context items are added.
:param environ: WSGI environment, provided by active runtime. ``dict``
in standard WSGI format.
:param start_response: Callable to begin the response cycle. Usually a
vanilla ``function``.
:param runtime: Currently-active Canteen runtime. Always an instance of
:py:class:`canteen.core.runtime.Runtime` or a subclass thereof.
:param request: Object to use for ``self.request``. Usually an instance
of :py:class:`werkzeug.wrappers.Request`.
:param response: Object to use for ``self.response``. Usually an
instance of :py:class:`werkzeug.wrappers.Response`. """
# startup/assign internals
self.__runtime__, self.__environ__, self.__callback__ = (
runtime, # reference to the active runtime
environ, # reference to WSGI environment
start_response) # reference to WSGI callback
# setup HTTP/dispatch stuff
self.__status__, self.__headers__, self.__content_type__ = (
200, # default response status
{}, # default repsonse headers
'text/html; charset=utf-8') # default content type
# request, response & context
self.__request__, self.__response__, self.__context__ = (
request, response, context)
# expose internals, but write-protect
routes = property(lambda self: self.__runtime__.routes)
status = property(lambda self: self.__status__)
headers = property(lambda self: self.__headers__)
content_type = property(lambda self: self.__content_type__)
# shortcuts & utilities
url_for = link = lambda self, end, **args: self.routes.build(end, args)
# WSGI internals
app = runtime = property(lambda self: self.__runtime__)
environment = environ = property(lambda self: self.__environ__)
start_response = callback = property(lambda self: self.__callback__)
# Context
session = property(lambda self: ( # session is tuple of (session, engine)
self.request.session[0] if self.request.session else None))
# Agent
agent = property(lambda self: (
self.__agent__ if self.__agent__ else (
setattr(self, '__agent__', self.http.agent.scan(self.request)) or (
self.__agent__))))
# Request & Response
request = property(lambda self: (
self.__request__ if self.__request__ else (
setattr(self, '__request__', self.http.new_request(self.__environ__)) or (
self.__request__))))
response = property(lambda self: (
self.__response__ if self.__response__ else (
setattr(self, '__response__', self.http.new_response()) or (
self.__response__))))
@property
def template_context(self):
""" Generate template context to be used in rendering source templates. The
``template_context`` accessor is expected to return a ``dict`` of
``name=>value`` pairs to present to the template API.
:returns: ``dict`` of template context. """
# for javascript context
from canteen.rpc import ServiceHandler
return {
# Default Context
'handler': self,
'config': getattr(self, 'config', {}),
'runtime': self.runtime,
# HTTP Context
'http': {
'agent': getattr(self, 'agent', None),
'request': self.request,
'response': self.response
},
# WSGI internals
'wsgi': {
'environ': self.environ,
'callback': self.callback,
'start_response': self.start_response
},
# Cache API
'cache': {
'get': self.cache.get,
'get_multi': self.cache.get_multi,
'set': self.cache.set,
'set_multi': self.cache.set_multi,
'delete': self.cache.delete,
'delete_multi': self.cache.delete_multi,
'clear': self.cache.clear,
'flush': self.cache.flush
},
# Assets API
'asset': {
'image': self.assets.image_url,
'style': self.assets.style_url,
'script': self.assets.script_url
},
# Service API
'services': {
'list': ServiceHandler.services,
'describe': ServiceHandler.describe
},
# Output API
'output': {
'render': self.template.render,
'environment': self.template.environment
},
# Routing
'link': self.url_for,
'route': {
'build': self.url_for,
'resolve': self.http.resolve_route
}
}
def respond(self, content=None, direct=False):
""" Respond to this ``Handler``'s request with raw ``str`` or ``unicode``
content. UTF-8 encoding happens if necessary.
:param content: Content to respond to. Must be ``str``, ``unicode``, or
a similar string buffer object.
:param direct: Flag indicating that ``self`` should be returned, rather
than ``self.response``. Bool, defaults to ``False`` as this
technically breaks WSGI.
:returns: Generated (filled-in) ``self.response`` object. """
# today is a good day
if not self.status: self.__status__ = 200
if content: self.response.response = content
# set status code and return
return setattr(self.response,
('status_code' if isinstance(self.status, int) else 'status'),
self.status) or (
(i.encode('utf-8').strip() for i in self.response.response),
self.response) if not direct else self
def render(self, template,
headers=None,
content_type='text/html; charset=utf-8',
context=None,
_direct=False, **kwargs):
""" Render a source ``template`` for the purpose of responding to this
``Handler``'s request, given ``context`` and proper ``headers`` for
return.
``kwargs`` are taken as extra template context and overlayed onto
``context`` before render.
:param template: Path to template file to serve. ``str`` or ``unicode``
file path.
:param headers: Extra headers to send with response. ``dict`` or iter of
``(name, value)`` tuples.
:param content_type: Value to send for ``Content-Type`` header. ``str``,
defaults to ``text/html; charset=utf-8``.
:param context: Extra template context to include during render.
``dict`` of items, with keys as names that values are bound to in the
resulting template context.
:param _direct: Flag indicating that ``self`` should be returned, rather
than ``self.response``. Bool, defaults to ``False`` as this
technically breaks WSGI.
:param kwargs: Additional items to add to the template context.
Overrides all other sources of context.
:returns: Rendered template content, added to ``self.response``. """
from canteen.util import config
# set mime type
if content_type: self.response.mimetype = content_type
# collapse and merge HTTP headers (base headers first)
self.response.headers.extend(itertools.chain(
iter(self.http.base_headers),
self.config.get('http', {}).get('headers', {}).iteritems(),
self.headers.iteritems(),
(headers or {}).iteritems()))
# merge template context
_merged_context = dict(itertools.chain(*(i.iteritems() for i in (
self.template.base_context,
self.template_context,
context or {},
kwargs))))
# render template and set as response data
self.response.response, self.response.direct_passthrough = (
self.template.render(
self,
getattr(self.runtime, 'config', None) or config.Config(),
template,
_merged_context)), True
return self.respond(direct=_direct)
def dispatch(self, **url_args):
""" Dispatch a WSGI request through this ``Handler``. Expected to be an
HTTP-style (cyclical) dispatch flow.
:param url_args: Arguments provided from the URI that should be passed
along to any resulting handler calls.
:returns: After filling the local response object (at ``self.response``)
returns it for inspection or reply. """
self.__response__ = (
getattr(self, self.request.method)(**url_args)) or self.__response__
return self.__response__
def __call__(self, url_args, direct=False):
""" Kick off the local response dispatch process, and run any necessary
pre/post hooks (named ``prepare`` and ``destroy``, respectively).
:param url_args: Arguments parsed from URL according to matched route.
``dict`` of ``{param: value}`` pairs.
:param direct: Flag to indicate 'direct' mode, whereby a handler is
returned instead of a response. Bool, defaults to ``False``, as this
technically breaks WSGI.
:returns: ``self.response`` if ``direct`` mode is not active, otherwise
``self`` for chainability. """
# run prepare hook, if specified
if hasattr(self, 'prepare'): self.prepare(url_args, direct=direct)
self.dispatch(**url_args) # dispatch local handler, fills `__response__`
# run destroy hook, if specified
if hasattr(self, 'destroy'): self.destroy(self.__response__)
return self.__response__ if not direct else self
# noinspection PyUnresolvedReferences
class RealtimeHandler(Handler):
""" Provides structure for an acyclically-dispatched web handler, meant for
use in scenarios like WebSockets. Instead of handling things with
methods like ``GET`` or ``POST``, a ``RealtimeHandler`` can specify
hooks for two events - ``on_connect`` and ``on_message``.
The first, ``on_connect``, is dispatched when a realtime connection has
just been successfully negotiated. It is executed once the application
is ready to return an ``HTTP/1.1 Upgrade`` response, so that the
developer has a chance to specify subprotocols/extensions/etc.
The second hook, ``on_message``, is dispatched each time an established
connection receives a message from the client. It takes two parameters -
the ``message`` itself and whether it is ``binary`` or not. """
__socket__ = None # space for currently-active realtime socket
def dispatch(self, **url_args): # pragma: no cover
""" Adapt regular handler dispatch to support an acyclic/realtime-style
dispatch scheme. Accepts same arguments as ``super`` definition, but
dispatches *realtime*-style messages like ``on_connect`` and
``on_message``, so long as the request looks like a WebSocket upgrade.
:param url_args: Arguments provided from the URI that should be passed
along to any resulting handler calls.
:returns: After filling the local response object (at ``self.response``)
returns it for inspection or reply. """
# fallback to standard dispatch
if self.realtime.hint not in self.environ:
return super(RealtimeHandler, self).dispatch(**url_args)
try:
# websocket upgrade and session
self.__socket__ = self.realtime.on_connect(self)
self.realtime.on_message(self, self.__socket__)
except NotImplementedError:
return self.error(400) # raised when a non-websocket handler is hit
@staticmethod
def terminate(graceful=True): # pragma: no cover
""" Terminate the currently-active ``RealtimeSocket`` communication
channel.
:param graceful: ``bool`` parameter, whether to end the connection
gracefully or not.
:returns: ``TERMINATE`` sentinel, to be yielded so the connection can be
terminated. """
from canteen.logic import realtime
if graceful: return realtime.TERMINATE
raise realtime.TerminateSocket(graceful=False)
@staticmethod
def on_connect(): # pragma: no cover
""" Hook function that is dispatched upon successful handshake for a
realtime-style connection between a client and this server. Local
handler should be prepared by this point with all information necessary
to satisfy messages.
Implementors are expected to provide a method that makes use of object-
level context (i.e. not a static or classmethod).
:returns: ``NotImplemented`` by default, which simply indicates that
the implementor elects not to run code ``on_connect``. """
return NotImplemented
def on_message(self, message, binary): # pragma: no cover
""" Hook that is dispatched per message sent from a live client. Called
subsequent to a connection being properly established from a previous
call to ``on_connect``.
:param message: WebSocket message passed from the client.
:param binary: ``bool`` flag - ``True`` if ``message`` is binary,
``False`` otherwise.
:raises NotImplementedError: By default, since not many people use
WebSockets and there's no such thing as a ``400`` without HTTP. :)
:returns: Not expected to return anything. If a return is used, any
value or iterable of values will be collapsed and sent to the client.
Optionally, the developer may implement ``on_message`` as a coroutine-
style Python generator, in which case new messages will be ``sent``
in from the client and messages to the client can be yielded upwards
to be sent. """
raise NotImplementedError('Handler "%s" fails to implement hook'
' `on_message` so it does not support'
' realtime-style communications.' % repr(self))
# noinspection PyUnusedLocal
@staticmethod
def on_close(graceful): # pragma: no cover
""" Hook function that is dispatched upon closure of an existing realtime
communications session.
:param graceful: ``bool`` parameter indicating whether the connection
was closed gracefully (i.e. electively) or because of some error
condition.
:returns: ``NotImplemented`` by default, which simply indicates that
the implementor elects not to run code ``on_connect``. """
return NotImplemented
__all__ = ('Handler',)
| momentum/canteen | canteen/base/handler.py | Python | mit | 17,022 |
# -*-coding: utf-8 -*-
import math
import numpy as np
import relations
def _avg_difference(npiece, side):
if side == relations.LEFT:
difference = npiece[:, 0] - npiece[:, 1]
elif side == relations.RIGHT:
difference = npiece[:, -1] - npiece[:, -2]
elif side == relations.UP:
difference = npiece[0, :] - npiece[1, :]
else:
difference = npiece[-1, :] - npiece[-2, :]
return sum(difference)/float(len(difference))
def _gradient(pieces_difference, average_side_difference):
grad = pieces_difference - average_side_difference
grad_t = np.transpose(grad)
cov = np.cov(grad_t)
try:
cov_inv = np.linalg.inv(cov)
except np.linalg.LinAlgError as e:
cov_inv = np.ones((3, 3))
return grad.dot(cov_inv).dot(grad_t)
def mgc(np1, np2, relation):
if relation == relations.LEFT:
grad_12 = _gradient(np2[:, 0] - np1[:, -1], _avg_difference(np1, relations.RIGHT))
grad_21 = _gradient(np1[:, -1] - np2[:, 0], _avg_difference(np2, relations.LEFT))
else:
grad_12 = _gradient(np2[0, :] - np1[-1, :], _avg_difference(np1, relations.DOWN))
grad_21 = _gradient(np1[-1, :] - np2[0, :], _avg_difference(np2, relations.UP))
return np.sum(grad_12 + grad_21)
def rgb(np1, np2, relation):
if relation == relations.LEFT:
difference = np1[:, -1] - np2[:, 0]
else:
difference = np1[-1, :] - np2[0, :]
exponent = np.vectorize(lambda x: math.pow(x, 2))
dissimilarity = np.sum(exponent(difference))
return math.sqrt(dissimilarity)
def rgb_mgc(*args):
return rgb(*args)*mgc(*args) | typeinference/jigsaw-puzzle | src/measures.py | Python | mit | 1,631 |
import RPi.GPIO as GPIO
import os
import time
import datetime
import glob
import MySQLdb
from time import strftime
import serial
ser = serial.Serial(
port='/dev/ttyACM0',
baudrate = 9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
counter = 0
GPIO.setmode(GPIO.BOARD)
GPIO.setup(15,GPIO.IN)
# Variables for MySQL
db = MySQLdb.connect(host="localhost", user="root",passwd="deb280794", db="temp_database")
cur = db.cursor()
while True:
x = ser.readline()
f=x.split()
moisture = f[1]
humidity = f[4]
temp = f[7]
print("Moisture: ")
print moisture
print("Humidity: ")
print humidity
print("Temperature: ")
print temp
datetimeWrite = (time.strftime("%Y-%m-%d ") + time.strftime("%H:%M:%S"))
sql = ("""INSERT INTO tempLog (datetime,temperature,humidity,moisture) VALUES (%s,%s,%s,%s)""",(datetimeWrite,temp,humidity,moisture))
try:
print "Writing to database..."
# Execute the SQL command
cur.execute(*sql)
# Commit your changes in the database
db.commit()
print "Write Complete"
except:
# Rollback in case there is any error
db.rollback()
print "Failed writing to database"
time.sleep(0.5)
| Shashank95/Intellifarm | logdata.py | Python | mit | 1,310 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning. See the @{$python/contrib.learn} guide.
@@BaseEstimator
@@Estimator
@@Trainable
@@Evaluable
@@KMeansClustering
@@ModeKeys
@@ModelFnOps
@@MetricSpec
@@PredictionKey
@@DNNClassifier
@@DNNRegressor
@@DNNLinearCombinedRegressor
@@DNNLinearCombinedClassifier
@@LinearClassifier
@@LinearRegressor
@@LogisticRegressor
@@Experiment
@@ExportStrategy
@@TaskType
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@infer_real_valued_columns_from_input
@@infer_real_valued_columns_from_input_fn
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
@@InputFnOps
@@ProblemType
@@build_parsing_serving_input_fn
@@ProblemType
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['datasets', 'head', 'io', 'models',
'monitors', 'NotFittedError', 'ops', 'preprocessing',
'utils', 'graph_actions']
remove_undocumented(__name__, _allowed_symbols)
| jjas0nn/solvem | tensorflow/lib/python2.7/site-packages/tensorflow/contrib/learn/__init__.py | Python | mit | 2,083 |
from __future__ import division
class ExitNode:
"""
Class for the exit node on our network
"""
def __init__(self, max_simulation_time):
"""
Initialise a node.
"""
self.individuals = []
self.id_number = -1
self.next_event_date = max_simulation_time
self.node_capacity = "Inf"
def __repr__(self):
"""
Representation of a node::
"""
return 'Exit Node'
def accept(self, next_individual, current_time):
"""
Accepts a new customer to the queue
"""
self.individuals.append(next_individual)
def update_next_event_date(self):
"""
Finds the time of the next event at this node
"""
pass | geraintpalmer/ASQ | asq/exit_node.py | Python | mit | 759 |
### EXESOFT PYIGNITION ###
# Copyright David Barker 2010
#
# Global constants module
# Which version is this?
PYIGNITION_VERSION = 1.0
# Drawtype constants
DRAWTYPE_POINT = 100
DRAWTYPE_CIRCLE = 101
DRAWTYPE_LINE = 102
DRAWTYPE_SCALELINE = 103
DRAWTYPE_BUBBLE = 104
DRAWTYPE_IMAGE = 105
# Interpolation type constants
INTERPOLATIONTYPE_LINEAR = 200
INTERPOLATIONTYPE_COSINE = 201
# Gravity constants
UNIVERSAL_CONSTANT_OF_MAKE_GRAVITY_LESS_STUPIDLY_SMALL = 1000.0 # Well, Newton got one to make it less stupidly large.
VORTEX_ACCELERATION = 0.01 # A tiny value added to the centripetal force exerted by vortex gravities to draw in particles
VORTEX_SWALLOWDIST = 20.0 # Particles closer than this will be swallowed up and regurgitated in the bit bucket
# Fraction of radius which can go inside an object
RADIUS_PERMITTIVITY = 0.3 | HelsinkiGroup5/Hackathon | display_manuel/constants.py | Python | mit | 865 |
import json
import math
import random
import os
class KMeans(object):
# TO-DO: Richard
def __init__(self, dataset=None):
file_path = os.path.dirname(os.path.realpath(__file__))
if dataset is None:
self.mega_dataset = json.loads(open(file_path + '/dataset.json', 'r').read())
else:
self.mega_dataset = json.loads(dataset)
def _ED(self, point1, point2):
result = 0
for i in xrange(len(point1)):
result += pow(point2[i] - point1[i], 2)
return math.sqrt(result)
def _closest(self, datum, centroids):
closest_index = None
closest_distance = None
for i, point in enumerate(centroids):
dist = self._ED(datum, point)
if closest_index is None or dist < closest_distance:
closest_index = i
closest_distance = dist
return closest_index
def _avg(self, li):
return sum(li) / float(len(li))
def _get_centroid(self, data):
try:
datum_len = range(len(next(iter(data))))
result = [0 for x in datum_len]
for datum in data:
for i, value in enumerate(datum):
result[i] += value
for i in datum_len:
result[i] /= float(len(data))
return tuple(result)
except StopIteration:
return ([0, 0, 0])
def _kmeans(self, k, iterations=100):
clusters = [set() for _ in xrange(k)]
centroids = random.sample(self.dataset, k)
# init data to clusters
for datum in self.dataset:
i = random.choice(range(k))
clusters[i].add(datum)
for _ in xrange(iterations):
for datum in self.dataset:
# remove from clusters
for c in clusters:
try:
c.remove(datum)
except KeyError:
pass
# get closest centroid index
closest_index = self._closest(datum, centroids)
# add to the new cluster
clusters[closest_index].add(datum)
# update centroids
centroids = [self._get_centroid(c) for c in clusters]
return clusters, centroids
def calculate(self, attr, to_file=False):
self.dataset = []
for data in self.mega_dataset[attr]:
self.dataset.append(tuple(data))
self.dataset = set(self.dataset)
champ2stat = {}
for i in xrange(len(self.mega_dataset['champions'])):
champ2stat[tuple(self.mega_dataset[attr][i])] = self.mega_dataset['champions'][i]
clusters, centroids = self._kmeans(len(self.mega_dataset[attr][0]), 100)
champ2cluster = []
for i, c in enumerate(clusters):
new_c = []
champ2cluster.append(new_c)
new_c.append(tuple(centroids[i]))
for champ in c:
new_c.append(champ2stat[champ])
if to_file:
f = open('output/' + attr + '_output.json', 'w')
f.write(json.dumps(champ2cluster, indent=4))
f.close()
return champ2cluster
# Example:
| avathardev/matchine-learning | kmeans.py | Python | mit | 3,350 |
#!/usr/bin/env python
# coding=utf-8
"""
The full documentation is at https://python_hangman.readthedocs.org.
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
import sys
errno = pytest.main(self.test_args)
sys.exit(errno)
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['click', 'future']
test_requirements = ['pytest', 'mock']
setup( # :off
name='python_hangman',
version='2.2.2',
description='Python Hangman TDD/MVC demonstration.',
long_description='\n\n'.join([readme, history]),
author='Manu Phatak',
author_email='[email protected]',
url='https://github.com/bionikspoon/python_hangman',
packages=['hangman',],
package_dir={'hangman':'hangman'},
include_package_data=True,
install_requires=requirements,
license='MIT',
zip_safe=False,
use_2to3=True,
cmdclass={'test': PyTest},
keywords='python_hangman Manu Phatak',
entry_points={'console_scripts': ['hangman = hangman.__main__:cli']},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Games/Entertainment :: Puzzle Games',
'Topic :: Terminals',
],
test_suite='tests',
tests_require=test_requirements
) # :on
| bionikspoon/python_hangman | setup.py | Python | mit | 2,404 |
# encoding: utf-8
from mongoengine.fields import BaseField
from marrow.package.canonical import name
from marrow.package.loader import load
class PythonReferenceField(BaseField):
"""A field that transforms a callable into a string reference using marrow.package on assignment, then back to the
callable when accessing."""
def to_python(self, value):
if callable(value):
return value
return load(value)
def to_mongo(self, value):
return name(value)
def validate(self, value, clean=True):
if not callable(value):
self.error('Only callables may be used in a %s' % self.__class__.__name__)
def prepare_query_value(self, op, value):
if not callable(value):
return value
return name(value) | deKross/task | marrow/task/field.py | Python | mit | 717 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/player/shared_player_city_bank_corellia_style_01.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/building/player/shared_player_city_bank_corellia_style_01.py | Python | mit | 446 |
__author__ = 'leif'
from django.contrib import admin
from models import *
admin.site.register(GameExperiment)
admin.site.register(UserProfile)
admin.site.register(MaxHighScore)
| leifos/boxes | treasure-houses/asg/admin.py | Python | mit | 178 |
#! coding:utf-8
"""
compiler tests.
These tests are among the very first that were written when SQLAlchemy
began in 2005. As a result the testing style here is very dense;
it's an ongoing job to break these into much smaller tests with correct pep8
styling and coherent test organization.
"""
from sqlalchemy.testing import eq_, is_, assert_raises, \
assert_raises_message, eq_ignore_whitespace
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import Integer, String, MetaData, Table, Column, select, \
func, not_, cast, text, tuple_, exists, update, bindparam,\
literal, and_, null, type_coerce, alias, or_, literal_column,\
Float, TIMESTAMP, Numeric, Date, Text, union, except_,\
intersect, union_all, Boolean, distinct, join, outerjoin, asc, desc,\
over, subquery, case, true, CheckConstraint
import decimal
from sqlalchemy.util import u
from sqlalchemy import exc, sql, util, types, schema
from sqlalchemy.sql import table, column, label
from sqlalchemy.sql.expression import ClauseList, _literal_as_text, HasPrefixes
from sqlalchemy.engine import default
from sqlalchemy.dialects import mysql, mssql, postgresql, oracle, \
sqlite, sybase
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import compiler
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
table2 = table(
'myothertable',
column('otherid', Integer),
column('othername', String),
)
table3 = table(
'thirdtable',
column('userid', Integer),
column('otherstuff', String),
)
metadata = MetaData()
# table with a schema
table4 = Table(
'remotetable', metadata,
Column('rem_id', Integer, primary_key=True),
Column('datatype_id', Integer),
Column('value', String(20)),
schema='remote_owner'
)
# table with a 'multipart' schema
table5 = Table(
'remotetable', metadata,
Column('rem_id', Integer, primary_key=True),
Column('datatype_id', Integer),
Column('value', String(20)),
schema='dbo.remote_owner'
)
users = table('users',
column('user_id'),
column('user_name'),
column('password'),
)
addresses = table('addresses',
column('address_id'),
column('user_id'),
column('street'),
column('city'),
column('state'),
column('zip')
)
keyed = Table('keyed', metadata,
Column('x', Integer, key='colx'),
Column('y', Integer, key='coly'),
Column('z', Integer),
)
class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_attribute_sanity(self):
assert hasattr(table1, 'c')
assert hasattr(table1.select(), 'c')
assert not hasattr(table1.c.myid.self_group(), 'columns')
assert hasattr(table1.select().self_group(), 'columns')
assert not hasattr(table1.c.myid, 'columns')
assert not hasattr(table1.c.myid, 'c')
assert not hasattr(table1.select().c.myid, 'c')
assert not hasattr(table1.select().c.myid, 'columns')
assert not hasattr(table1.alias().c.myid, 'columns')
assert not hasattr(table1.alias().c.myid, 'c')
if util.compat.py32:
assert_raises_message(
exc.InvalidRequestError,
'Scalar Select expression has no '
'columns; use this object directly within a '
'column-level expression.',
lambda: hasattr(
select([table1.c.myid]).as_scalar().self_group(),
'columns'))
assert_raises_message(
exc.InvalidRequestError,
'Scalar Select expression has no '
'columns; use this object directly within a '
'column-level expression.',
lambda: hasattr(select([table1.c.myid]).as_scalar(),
'columns'))
else:
assert not hasattr(
select([table1.c.myid]).as_scalar().self_group(),
'columns')
assert not hasattr(select([table1.c.myid]).as_scalar(), 'columns')
def test_prefix_constructor(self):
class Pref(HasPrefixes):
def _generate(self):
return self
assert_raises(exc.ArgumentError,
Pref().prefix_with,
"some prefix", not_a_dialect=True
)
def test_table_select(self):
self.assert_compile(table1.select(),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable")
self.assert_compile(
select(
[
table1,
table2]),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable")
def test_invalid_col_argument(self):
assert_raises(exc.ArgumentError, select, table1)
assert_raises(exc.ArgumentError, select, table1.c.myid)
def test_int_limit_offset_coercion(self):
for given, exp in [
("5", 5),
(5, 5),
(5.2, 5),
(decimal.Decimal("5"), 5),
(None, None),
]:
eq_(select().limit(given)._limit, exp)
eq_(select().offset(given)._offset, exp)
eq_(select(limit=given)._limit, exp)
eq_(select(offset=given)._offset, exp)
assert_raises(ValueError, select().limit, "foo")
assert_raises(ValueError, select().offset, "foo")
assert_raises(ValueError, select, offset="foo")
assert_raises(ValueError, select, limit="foo")
def test_limit_offset_no_int_coercion_one(self):
exp1 = literal_column("Q")
exp2 = literal_column("Y")
self.assert_compile(
select([1]).limit(exp1).offset(exp2),
"SELECT 1 LIMIT Q OFFSET Y"
)
self.assert_compile(
select([1]).limit(bindparam('x')).offset(bindparam('y')),
"SELECT 1 LIMIT :x OFFSET :y"
)
def test_limit_offset_no_int_coercion_two(self):
exp1 = literal_column("Q")
exp2 = literal_column("Y")
sel = select([1]).limit(exp1).offset(exp2)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for limit",
getattr, sel, "_limit"
)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for offset",
getattr, sel, "_offset"
)
def test_limit_offset_no_int_coercion_three(self):
exp1 = bindparam("Q")
exp2 = bindparam("Y")
sel = select([1]).limit(exp1).offset(exp2)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for limit",
getattr, sel, "_limit"
)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for offset",
getattr, sel, "_offset"
)
def test_limit_offset(self):
for lim, offset, exp, params in [
(5, 10, "LIMIT :param_1 OFFSET :param_2",
{'param_1': 5, 'param_2': 10}),
(None, 10, "LIMIT -1 OFFSET :param_1", {'param_1': 10}),
(5, None, "LIMIT :param_1", {'param_1': 5}),
(0, 0, "LIMIT :param_1 OFFSET :param_2",
{'param_1': 0, 'param_2': 0}),
]:
self.assert_compile(
select([1]).limit(lim).offset(offset),
"SELECT 1 " + exp,
checkparams=params
)
def test_limit_offset_select_literal_binds(self):
stmt = select([1]).limit(5).offset(6)
self.assert_compile(
stmt,
"SELECT 1 LIMIT 5 OFFSET 6",
literal_binds=True
)
def test_limit_offset_compound_select_literal_binds(self):
stmt = select([1]).union(select([2])).limit(5).offset(6)
self.assert_compile(
stmt,
"SELECT 1 UNION SELECT 2 LIMIT 5 OFFSET 6",
literal_binds=True
)
def test_select_precol_compile_ordering(self):
s1 = select([column('x')]).select_from(text('a')).limit(5).as_scalar()
s2 = select([s1]).limit(10)
class MyCompiler(compiler.SQLCompiler):
def get_select_precolumns(self, select, **kw):
result = ""
if select._limit:
result += "FIRST %s " % self.process(
literal(
select._limit), **kw)
if select._offset:
result += "SKIP %s " % self.process(
literal(
select._offset), **kw)
return result
def limit_clause(self, select, **kw):
return ""
dialect = default.DefaultDialect()
dialect.statement_compiler = MyCompiler
dialect.paramstyle = 'qmark'
dialect.positional = True
self.assert_compile(
s2,
"SELECT FIRST ? (SELECT FIRST ? x FROM a) AS anon_1",
checkpositional=(10, 5),
dialect=dialect
)
def test_from_subquery(self):
"""tests placing select statements in the column clause of
another select, for the
purposes of selecting from the exported columns of that select."""
s = select([table1], table1.c.name == 'jack')
self.assert_compile(
select(
[s],
s.c.myid == 7),
"SELECT myid, name, description FROM "
"(SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description AS description "
"FROM mytable "
"WHERE mytable.name = :name_1) WHERE myid = :myid_1")
sq = select([table1])
self.assert_compile(
sq.select(),
"SELECT myid, name, description FROM "
"(SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description "
"AS description FROM mytable)"
)
sq = select(
[table1],
).alias('sq')
self.assert_compile(
sq.select(sq.c.myid == 7),
"SELECT sq.myid, sq.name, sq.description FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name, "
"mytable.description AS description FROM mytable) AS sq "
"WHERE sq.myid = :myid_1"
)
sq = select(
[table1, table2],
and_(table1.c.myid == 7, table2.c.otherid == table1.c.myid),
use_labels=True
).alias('sq')
sqstring = "SELECT mytable.myid AS mytable_myid, mytable.name AS "\
"mytable_name, mytable.description AS mytable_description, "\
"myothertable.otherid AS myothertable_otherid, "\
"myothertable.othername AS myothertable_othername FROM "\
"mytable, myothertable WHERE mytable.myid = :myid_1 AND "\
"myothertable.otherid = mytable.myid"
self.assert_compile(
sq.select(),
"SELECT sq.mytable_myid, sq.mytable_name, "
"sq.mytable_description, sq.myothertable_otherid, "
"sq.myothertable_othername FROM (%s) AS sq" % sqstring)
sq2 = select(
[sq],
use_labels=True
).alias('sq2')
self.assert_compile(
sq2.select(),
"SELECT sq2.sq_mytable_myid, sq2.sq_mytable_name, "
"sq2.sq_mytable_description, sq2.sq_myothertable_otherid, "
"sq2.sq_myothertable_othername FROM "
"(SELECT sq.mytable_myid AS "
"sq_mytable_myid, sq.mytable_name AS sq_mytable_name, "
"sq.mytable_description AS sq_mytable_description, "
"sq.myothertable_otherid AS sq_myothertable_otherid, "
"sq.myothertable_othername AS sq_myothertable_othername "
"FROM (%s) AS sq) AS sq2" % sqstring)
def test_select_from_clauselist(self):
self.assert_compile(
select([ClauseList(column('a'), column('b'))]
).select_from(text('sometable')),
'SELECT a, b FROM sometable'
)
def test_use_labels(self):
self.assert_compile(
select([table1.c.myid == 5], use_labels=True),
"SELECT mytable.myid = :myid_1 AS anon_1 FROM mytable"
)
self.assert_compile(
select([func.foo()], use_labels=True),
"SELECT foo() AS foo_1"
)
# this is native_boolean=False for default dialect
self.assert_compile(
select([not_(True)], use_labels=True),
"SELECT :param_1 = 0 AS anon_1"
)
self.assert_compile(
select([cast("data", Integer)], use_labels=True),
"SELECT CAST(:param_1 AS INTEGER) AS anon_1"
)
self.assert_compile(
select([func.sum(
func.lala(table1.c.myid).label('foo')).label('bar')]),
"SELECT sum(lala(mytable.myid)) AS bar FROM mytable"
)
self.assert_compile(
select([keyed]),
"SELECT keyed.x, keyed.y"
", keyed.z FROM keyed"
)
self.assert_compile(
select([keyed]).apply_labels(),
"SELECT keyed.x AS keyed_x, keyed.y AS "
"keyed_y, keyed.z AS keyed_z FROM keyed"
)
def test_paramstyles(self):
stmt = text("select :foo, :bar, :bat from sometable")
self.assert_compile(
stmt,
"select ?, ?, ? from sometable",
dialect=default.DefaultDialect(paramstyle='qmark')
)
self.assert_compile(
stmt,
"select :foo, :bar, :bat from sometable",
dialect=default.DefaultDialect(paramstyle='named')
)
self.assert_compile(
stmt,
"select %s, %s, %s from sometable",
dialect=default.DefaultDialect(paramstyle='format')
)
self.assert_compile(
stmt,
"select :1, :2, :3 from sometable",
dialect=default.DefaultDialect(paramstyle='numeric')
)
self.assert_compile(
stmt,
"select %(foo)s, %(bar)s, %(bat)s from sometable",
dialect=default.DefaultDialect(paramstyle='pyformat')
)
def test_anon_param_name_on_keys(self):
self.assert_compile(
keyed.insert(),
"INSERT INTO keyed (x, y, z) VALUES (%(colx)s, %(coly)s, %(z)s)",
dialect=default.DefaultDialect(paramstyle='pyformat')
)
self.assert_compile(
keyed.c.coly == 5,
"keyed.y = %(coly_1)s",
checkparams={'coly_1': 5},
dialect=default.DefaultDialect(paramstyle='pyformat')
)
def test_dupe_columns(self):
"""test that deduping is performed against clause
element identity, not rendered result."""
self.assert_compile(
select([column('a'), column('a'), column('a')]),
"SELECT a, a, a", dialect=default.DefaultDialect()
)
c = column('a')
self.assert_compile(
select([c, c, c]),
"SELECT a", dialect=default.DefaultDialect()
)
a, b = column('a'), column('b')
self.assert_compile(
select([a, b, b, b, a, a]),
"SELECT a, b", dialect=default.DefaultDialect()
)
# using alternate keys.
a, b, c = Column('a', Integer, key='b'), \
Column('b', Integer), \
Column('c', Integer, key='a')
self.assert_compile(
select([a, b, c, a, b, c]),
"SELECT a, b, c", dialect=default.DefaultDialect()
)
self.assert_compile(
select([bindparam('a'), bindparam('b'), bindparam('c')]),
"SELECT :a AS anon_1, :b AS anon_2, :c AS anon_3",
dialect=default.DefaultDialect(paramstyle='named')
)
self.assert_compile(
select([bindparam('a'), bindparam('b'), bindparam('c')]),
"SELECT ? AS anon_1, ? AS anon_2, ? AS anon_3",
dialect=default.DefaultDialect(paramstyle='qmark'),
)
self.assert_compile(
select([column("a"), column("a"), column("a")]),
"SELECT a, a, a"
)
s = select([bindparam('a'), bindparam('b'), bindparam('c')])
s = s.compile(dialect=default.DefaultDialect(paramstyle='qmark'))
eq_(s.positiontup, ['a', 'b', 'c'])
def test_nested_label_targeting(self):
"""test nested anonymous label generation.
"""
s1 = table1.select()
s2 = s1.alias()
s3 = select([s2], use_labels=True)
s4 = s3.alias()
s5 = select([s4], use_labels=True)
self.assert_compile(s5,
'SELECT anon_1.anon_2_myid AS '
'anon_1_anon_2_myid, anon_1.anon_2_name AS '
'anon_1_anon_2_name, anon_1.anon_2_descript'
'ion AS anon_1_anon_2_description FROM '
'(SELECT anon_2.myid AS anon_2_myid, '
'anon_2.name AS anon_2_name, '
'anon_2.description AS anon_2_description '
'FROM (SELECT mytable.myid AS myid, '
'mytable.name AS name, mytable.description '
'AS description FROM mytable) AS anon_2) '
'AS anon_1')
def test_nested_label_targeting_keyed(self):
s1 = keyed.select()
s2 = s1.alias()
s3 = select([s2], use_labels=True)
self.assert_compile(s3,
"SELECT anon_1.x AS anon_1_x, "
"anon_1.y AS anon_1_y, "
"anon_1.z AS anon_1_z FROM "
"(SELECT keyed.x AS x, keyed.y "
"AS y, keyed.z AS z FROM keyed) AS anon_1")
s4 = s3.alias()
s5 = select([s4], use_labels=True)
self.assert_compile(s5,
"SELECT anon_1.anon_2_x AS anon_1_anon_2_x, "
"anon_1.anon_2_y AS anon_1_anon_2_y, "
"anon_1.anon_2_z AS anon_1_anon_2_z "
"FROM (SELECT anon_2.x AS anon_2_x, "
"anon_2.y AS anon_2_y, "
"anon_2.z AS anon_2_z FROM "
"(SELECT keyed.x AS x, keyed.y AS y, keyed.z "
"AS z FROM keyed) AS anon_2) AS anon_1"
)
def test_exists(self):
s = select([table1.c.myid]).where(table1.c.myid == 5)
self.assert_compile(exists(s),
"EXISTS (SELECT mytable.myid FROM mytable "
"WHERE mytable.myid = :myid_1)"
)
self.assert_compile(exists(s.as_scalar()),
"EXISTS (SELECT mytable.myid FROM mytable "
"WHERE mytable.myid = :myid_1)"
)
self.assert_compile(exists([table1.c.myid], table1.c.myid
== 5).select(),
'SELECT EXISTS (SELECT mytable.myid FROM '
'mytable WHERE mytable.myid = :myid_1) AS anon_1',
params={'mytable_myid': 5})
self.assert_compile(select([table1, exists([1],
from_obj=table2)]),
'SELECT mytable.myid, mytable.name, '
'mytable.description, EXISTS (SELECT 1 '
'FROM myothertable) AS anon_1 FROM mytable',
params={})
self.assert_compile(select([table1,
exists([1],
from_obj=table2).label('foo')]),
'SELECT mytable.myid, mytable.name, '
'mytable.description, EXISTS (SELECT 1 '
'FROM myothertable) AS foo FROM mytable',
params={})
self.assert_compile(
table1.select(
exists().where(
table2.c.otherid == table1.c.myid).correlate(table1)),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'EXISTS (SELECT * FROM myothertable WHERE '
'myothertable.otherid = mytable.myid)')
self.assert_compile(
table1.select(
exists().where(
table2.c.otherid == table1.c.myid).correlate(table1)),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'EXISTS (SELECT * FROM myothertable WHERE '
'myothertable.otherid = mytable.myid)')
self.assert_compile(
table1.select(
exists().where(
table2.c.otherid == table1.c.myid).correlate(table1)
).replace_selectable(
table2,
table2.alias()),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'EXISTS (SELECT * FROM myothertable AS '
'myothertable_1 WHERE myothertable_1.otheri'
'd = mytable.myid)')
self.assert_compile(
table1.select(
exists().where(
table2.c.otherid == table1.c.myid).correlate(table1)).
select_from(
table1.join(
table2,
table1.c.myid == table2.c.otherid)).
replace_selectable(
table2,
table2.alias()),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable JOIN '
'myothertable AS myothertable_1 ON '
'mytable.myid = myothertable_1.otherid '
'WHERE EXISTS (SELECT * FROM myothertable '
'AS myothertable_1 WHERE '
'myothertable_1.otherid = mytable.myid)')
self.assert_compile(
select([
or_(
exists().where(table2.c.otherid == 'foo'),
exists().where(table2.c.otherid == 'bar')
)
]),
"SELECT (EXISTS (SELECT * FROM myothertable "
"WHERE myothertable.otherid = :otherid_1)) "
"OR (EXISTS (SELECT * FROM myothertable WHERE "
"myothertable.otherid = :otherid_2)) AS anon_1"
)
self.assert_compile(
select([exists([1])]),
"SELECT EXISTS (SELECT 1) AS anon_1"
)
self.assert_compile(
select([~exists([1])]),
"SELECT NOT (EXISTS (SELECT 1)) AS anon_1"
)
self.assert_compile(
select([~(~exists([1]))]),
"SELECT NOT (NOT (EXISTS (SELECT 1))) AS anon_1"
)
def test_where_subquery(self):
s = select([addresses.c.street], addresses.c.user_id
== users.c.user_id, correlate=True).alias('s')
# don't correlate in a FROM list
self.assert_compile(select([users, s.c.street], from_obj=s),
"SELECT users.user_id, users.user_name, "
"users.password, s.street FROM users, "
"(SELECT addresses.street AS street FROM "
"addresses, users WHERE addresses.user_id = "
"users.user_id) AS s")
self.assert_compile(table1.select(
table1.c.myid == select(
[table1.c.myid],
table1.c.name == 'jack')),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'mytable.myid = (SELECT mytable.myid FROM '
'mytable WHERE mytable.name = :name_1)')
self.assert_compile(
table1.select(
table1.c.myid == select(
[table2.c.otherid],
table1.c.name == table2.c.othername
)
),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'mytable.myid = (SELECT '
'myothertable.otherid FROM myothertable '
'WHERE mytable.name = myothertable.othernam'
'e)')
self.assert_compile(table1.select(exists([1], table2.c.otherid
== table1.c.myid)),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'EXISTS (SELECT 1 FROM myothertable WHERE '
'myothertable.otherid = mytable.myid)')
talias = table1.alias('ta')
s = subquery('sq2', [talias], exists([1], table2.c.otherid
== talias.c.myid))
self.assert_compile(select([s, table1]),
'SELECT sq2.myid, sq2.name, '
'sq2.description, mytable.myid, '
'mytable.name, mytable.description FROM '
'(SELECT ta.myid AS myid, ta.name AS name, '
'ta.description AS description FROM '
'mytable AS ta WHERE EXISTS (SELECT 1 FROM '
'myothertable WHERE myothertable.otherid = '
'ta.myid)) AS sq2, mytable')
# test constructing the outer query via append_column(), which
# occurs in the ORM's Query object
s = select([], exists([1], table2.c.otherid == table1.c.myid),
from_obj=table1)
s.append_column(table1)
self.assert_compile(s,
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'EXISTS (SELECT 1 FROM myothertable WHERE '
'myothertable.otherid = mytable.myid)')
def test_orderby_subquery(self):
self.assert_compile(
table1.select(
order_by=[
select(
[
table2.c.otherid],
table1.c.myid == table2.c.otherid)]),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable ORDER BY '
'(SELECT myothertable.otherid FROM '
'myothertable WHERE mytable.myid = '
'myothertable.otherid)')
self.assert_compile(table1.select(order_by=[
desc(select([table2.c.otherid],
table1.c.myid == table2.c.otherid))]),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable ORDER BY '
'(SELECT myothertable.otherid FROM '
'myothertable WHERE mytable.myid = '
'myothertable.otherid) DESC')
def test_scalar_select(self):
assert_raises_message(
exc.InvalidRequestError,
r"Select objects don't have a type\. Call as_scalar\(\) "
"on this Select object to return a 'scalar' "
"version of this Select\.",
func.coalesce, select([table1.c.myid])
)
s = select([table1.c.myid], correlate=False).as_scalar()
self.assert_compile(select([table1, s]),
'SELECT mytable.myid, mytable.name, '
'mytable.description, (SELECT mytable.myid '
'FROM mytable) AS anon_1 FROM mytable')
s = select([table1.c.myid]).as_scalar()
self.assert_compile(select([table2, s]),
'SELECT myothertable.otherid, '
'myothertable.othername, (SELECT '
'mytable.myid FROM mytable) AS anon_1 FROM '
'myothertable')
s = select([table1.c.myid]).correlate(None).as_scalar()
self.assert_compile(select([table1, s]),
'SELECT mytable.myid, mytable.name, '
'mytable.description, (SELECT mytable.myid '
'FROM mytable) AS anon_1 FROM mytable')
s = select([table1.c.myid]).as_scalar()
s2 = s.where(table1.c.myid == 5)
self.assert_compile(
s2,
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)"
)
self.assert_compile(
s, "(SELECT mytable.myid FROM mytable)"
)
# test that aliases use as_scalar() when used in an explicitly
# scalar context
s = select([table1.c.myid]).alias()
self.assert_compile(select([table1.c.myid]).where(table1.c.myid
== s),
'SELECT mytable.myid FROM mytable WHERE '
'mytable.myid = (SELECT mytable.myid FROM '
'mytable)')
self.assert_compile(select([table1.c.myid]).where(s
> table1.c.myid),
'SELECT mytable.myid FROM mytable WHERE '
'mytable.myid < (SELECT mytable.myid FROM '
'mytable)')
s = select([table1.c.myid]).as_scalar()
self.assert_compile(select([table2, s]),
'SELECT myothertable.otherid, '
'myothertable.othername, (SELECT '
'mytable.myid FROM mytable) AS anon_1 FROM '
'myothertable')
# test expressions against scalar selects
self.assert_compile(select([s - literal(8)]),
'SELECT (SELECT mytable.myid FROM mytable) '
'- :param_1 AS anon_1')
self.assert_compile(select([select([table1.c.name]).as_scalar()
+ literal('x')]),
'SELECT (SELECT mytable.name FROM mytable) '
'|| :param_1 AS anon_1')
self.assert_compile(select([s > literal(8)]),
'SELECT (SELECT mytable.myid FROM mytable) '
'> :param_1 AS anon_1')
self.assert_compile(select([select([table1.c.name]).label('foo'
)]),
'SELECT (SELECT mytable.name FROM mytable) '
'AS foo')
# scalar selects should not have any attributes on their 'c' or
# 'columns' attribute
s = select([table1.c.myid]).as_scalar()
try:
s.c.foo
except exc.InvalidRequestError as err:
assert str(err) \
== 'Scalar Select expression has no columns; use this '\
'object directly within a column-level expression.'
try:
s.columns.foo
except exc.InvalidRequestError as err:
assert str(err) \
== 'Scalar Select expression has no columns; use this '\
'object directly within a column-level expression.'
zips = table('zips',
column('zipcode'),
column('latitude'),
column('longitude'),
)
places = table('places',
column('id'),
column('nm')
)
zip = '12345'
qlat = select([zips.c.latitude], zips.c.zipcode == zip).\
correlate(None).as_scalar()
qlng = select([zips.c.longitude], zips.c.zipcode == zip).\
correlate(None).as_scalar()
q = select([places.c.id, places.c.nm, zips.c.zipcode,
func.latlondist(qlat, qlng).label('dist')],
zips.c.zipcode == zip,
order_by=['dist', places.c.nm]
)
self.assert_compile(q,
'SELECT places.id, places.nm, '
'zips.zipcode, latlondist((SELECT '
'zips.latitude FROM zips WHERE '
'zips.zipcode = :zipcode_1), (SELECT '
'zips.longitude FROM zips WHERE '
'zips.zipcode = :zipcode_2)) AS dist FROM '
'places, zips WHERE zips.zipcode = '
':zipcode_3 ORDER BY dist, places.nm')
zalias = zips.alias('main_zip')
qlat = select([zips.c.latitude], zips.c.zipcode == zalias.c.zipcode).\
as_scalar()
qlng = select([zips.c.longitude], zips.c.zipcode == zalias.c.zipcode).\
as_scalar()
q = select([places.c.id, places.c.nm, zalias.c.zipcode,
func.latlondist(qlat, qlng).label('dist')],
order_by=['dist', places.c.nm])
self.assert_compile(q,
'SELECT places.id, places.nm, '
'main_zip.zipcode, latlondist((SELECT '
'zips.latitude FROM zips WHERE '
'zips.zipcode = main_zip.zipcode), (SELECT '
'zips.longitude FROM zips WHERE '
'zips.zipcode = main_zip.zipcode)) AS dist '
'FROM places, zips AS main_zip ORDER BY '
'dist, places.nm')
a1 = table2.alias('t2alias')
s1 = select([a1.c.otherid], table1.c.myid == a1.c.otherid).as_scalar()
j1 = table1.join(table2, table1.c.myid == table2.c.otherid)
s2 = select([table1, s1], from_obj=j1)
self.assert_compile(s2,
'SELECT mytable.myid, mytable.name, '
'mytable.description, (SELECT '
't2alias.otherid FROM myothertable AS '
't2alias WHERE mytable.myid = '
't2alias.otherid) AS anon_1 FROM mytable '
'JOIN myothertable ON mytable.myid = '
'myothertable.otherid')
def test_label_comparison_one(self):
x = func.lala(table1.c.myid).label('foo')
self.assert_compile(select([x], x == 5),
'SELECT lala(mytable.myid) AS foo FROM '
'mytable WHERE lala(mytable.myid) = '
':param_1')
def test_label_comparison_two(self):
self.assert_compile(
label('bar', column('foo', type_=String)) + 'foo',
'foo || :param_1')
def test_order_by_labels_enabled(self):
lab1 = (table1.c.myid + 12).label('foo')
lab2 = func.somefunc(table1.c.name).label('bar')
dialect = default.DefaultDialect()
self.assert_compile(select([lab1, lab2]).order_by(lab1, desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY foo, bar DESC",
dialect=dialect
)
# the function embedded label renders as the function
self.assert_compile(
select([lab1, lab2]).order_by(func.hoho(lab1), desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY hoho(mytable.myid + :myid_1), bar DESC",
dialect=dialect
)
# binary expressions render as the expression without labels
self.assert_compile(select([lab1, lab2]).order_by(lab1 + "test"),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable.myid + :myid_1 + :param_1",
dialect=dialect
)
# labels within functions in the columns clause render
# with the expression
self.assert_compile(
select([lab1, func.foo(lab1)]).order_by(lab1, func.foo(lab1)),
"SELECT mytable.myid + :myid_1 AS foo, "
"foo(mytable.myid + :myid_1) AS foo_1 FROM mytable "
"ORDER BY foo, foo(mytable.myid + :myid_1)",
dialect=dialect
)
lx = (table1.c.myid + table1.c.myid).label('lx')
ly = (func.lower(table1.c.name) + table1.c.description).label('ly')
self.assert_compile(
select([lx, ly]).order_by(lx, ly.desc()),
"SELECT mytable.myid + mytable.myid AS lx, "
"lower(mytable.name) || mytable.description AS ly "
"FROM mytable ORDER BY lx, ly DESC",
dialect=dialect
)
def test_order_by_labels_disabled(self):
lab1 = (table1.c.myid + 12).label('foo')
lab2 = func.somefunc(table1.c.name).label('bar')
dialect = default.DefaultDialect()
dialect.supports_simple_order_by_label = False
self.assert_compile(
select(
[
lab1,
lab2]).order_by(
lab1,
desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable.myid + :myid_1, somefunc(mytable.name) DESC",
dialect=dialect)
self.assert_compile(
select([lab1, lab2]).order_by(func.hoho(lab1), desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY hoho(mytable.myid + :myid_1), "
"somefunc(mytable.name) DESC",
dialect=dialect
)
def test_no_group_by_labels(self):
lab1 = (table1.c.myid + 12).label('foo')
lab2 = func.somefunc(table1.c.name).label('bar')
dialect = default.DefaultDialect()
self.assert_compile(
select([lab1, lab2]).group_by(lab1, lab2),
"SELECT mytable.myid + :myid_1 AS foo, somefunc(mytable.name) "
"AS bar FROM mytable GROUP BY mytable.myid + :myid_1, "
"somefunc(mytable.name)",
dialect=dialect
)
def test_conjunctions(self):
a, b, c = text('a'), text('b'), text('c')
x = and_(a, b, c)
assert isinstance(x.type, Boolean)
assert str(x) == 'a AND b AND c'
self.assert_compile(
select([x.label('foo')]),
'SELECT a AND b AND c AS foo'
)
self.assert_compile(
and_(table1.c.myid == 12, table1.c.name == 'asdf',
table2.c.othername == 'foo', text("sysdate() = today()")),
"mytable.myid = :myid_1 AND mytable.name = :name_1 "
"AND myothertable.othername = "
":othername_1 AND sysdate() = today()"
)
self.assert_compile(
and_(
table1.c.myid == 12,
or_(table2.c.othername == 'asdf',
table2.c.othername == 'foo', table2.c.otherid == 9),
text("sysdate() = today()"),
),
'mytable.myid = :myid_1 AND (myothertable.othername = '
':othername_1 OR myothertable.othername = :othername_2 OR '
'myothertable.otherid = :otherid_1) AND sysdate() = '
'today()',
checkparams={'othername_1': 'asdf', 'othername_2': 'foo',
'otherid_1': 9, 'myid_1': 12}
)
# test a generator
self.assert_compile(
and_(
conj for conj in [
table1.c.myid == 12,
table1.c.name == 'asdf'
]
),
"mytable.myid = :myid_1 AND mytable.name = :name_1"
)
def test_nested_conjunctions_short_circuit(self):
"""test that empty or_(), and_() conjunctions are collapsed by
an enclosing conjunction."""
t = table('t', column('x'))
self.assert_compile(
select([t]).where(and_(t.c.x == 5,
or_(and_(or_(t.c.x == 7))))),
"SELECT t.x FROM t WHERE t.x = :x_1 AND t.x = :x_2"
)
self.assert_compile(
select([t]).where(and_(or_(t.c.x == 12,
and_(or_(t.c.x == 8))))),
"SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2"
)
self.assert_compile(
select([t]).
where(
and_(
or_(
or_(t.c.x == 12),
and_(
or_(),
or_(and_(t.c.x == 8)),
and_()
)
)
)
),
"SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2"
)
def test_true_short_circuit(self):
t = table('t', column('x'))
self.assert_compile(
select([t]).where(true()),
"SELECT t.x FROM t WHERE 1 = 1",
dialect=default.DefaultDialect(supports_native_boolean=False)
)
self.assert_compile(
select([t]).where(true()),
"SELECT t.x FROM t WHERE true",
dialect=default.DefaultDialect(supports_native_boolean=True)
)
self.assert_compile(
select([t]),
"SELECT t.x FROM t",
dialect=default.DefaultDialect(supports_native_boolean=True)
)
def test_distinct(self):
self.assert_compile(
select([table1.c.myid.distinct()]),
"SELECT DISTINCT mytable.myid FROM mytable"
)
self.assert_compile(
select([distinct(table1.c.myid)]),
"SELECT DISTINCT mytable.myid FROM mytable"
)
self.assert_compile(
select([table1.c.myid]).distinct(),
"SELECT DISTINCT mytable.myid FROM mytable"
)
self.assert_compile(
select([func.count(table1.c.myid.distinct())]),
"SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable"
)
self.assert_compile(
select([func.count(distinct(table1.c.myid))]),
"SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable"
)
def test_where_empty(self):
self.assert_compile(
select([table1.c.myid]).where(and_()),
"SELECT mytable.myid FROM mytable"
)
self.assert_compile(
select([table1.c.myid]).where(or_()),
"SELECT mytable.myid FROM mytable"
)
def test_multiple_col_binds(self):
self.assert_compile(
select(
[literal_column("*")],
or_(
table1.c.myid == 12, table1.c.myid == 'asdf',
table1.c.myid == 'foo')
),
"SELECT * FROM mytable WHERE mytable.myid = :myid_1 "
"OR mytable.myid = :myid_2 OR mytable.myid = :myid_3"
)
def test_order_by_nulls(self):
self.assert_compile(
table2.select(order_by=[table2.c.otherid,
table2.c.othername.desc().nullsfirst()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC NULLS FIRST"
)
self.assert_compile(
table2.select(order_by=[
table2.c.otherid, table2.c.othername.desc().nullslast()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC NULLS LAST"
)
self.assert_compile(
table2.select(order_by=[
table2.c.otherid.nullslast(),
table2.c.othername.desc().nullsfirst()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS LAST, "
"myothertable.othername DESC NULLS FIRST"
)
self.assert_compile(
table2.select(order_by=[table2.c.otherid.nullsfirst(),
table2.c.othername.desc()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS FIRST, "
"myothertable.othername DESC"
)
self.assert_compile(
table2.select(order_by=[table2.c.otherid.nullsfirst(),
table2.c.othername.desc().nullslast()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS FIRST, "
"myothertable.othername DESC NULLS LAST"
)
def test_orderby_groupby(self):
self.assert_compile(
table2.select(order_by=[table2.c.otherid,
asc(table2.c.othername)]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername ASC"
)
self.assert_compile(
table2.select(order_by=[table2.c.otherid,
table2.c.othername.desc()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC"
)
# generative order_by
self.assert_compile(
table2.select().order_by(table2.c.otherid).
order_by(table2.c.othername.desc()),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC"
)
self.assert_compile(
table2.select().order_by(table2.c.otherid).
order_by(table2.c.othername.desc()
).order_by(None),
"SELECT myothertable.otherid, myothertable.othername "
"FROM myothertable"
)
self.assert_compile(
select(
[table2.c.othername, func.count(table2.c.otherid)],
group_by=[table2.c.othername]),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable GROUP BY myothertable.othername"
)
# generative group by
self.assert_compile(
select([table2.c.othername, func.count(table2.c.otherid)]).
group_by(table2.c.othername),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable GROUP BY myothertable.othername"
)
self.assert_compile(
select([table2.c.othername, func.count(table2.c.otherid)]).
group_by(table2.c.othername).group_by(None),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable"
)
self.assert_compile(
select([table2.c.othername, func.count(table2.c.otherid)],
group_by=[table2.c.othername],
order_by=[table2.c.othername]),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable "
"GROUP BY myothertable.othername ORDER BY myothertable.othername"
)
def test_for_update(self):
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE")
# not supported by dialect, should just use update
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE")
assert_raises_message(
exc.ArgumentError,
"Unknown for_update argument: 'unknown_mode'",
table1.select, table1.c.myid == 7, for_update='unknown_mode'
)
def test_alias(self):
# test the alias for a table1. column names stay the same,
# table name "changes" to "foo".
self.assert_compile(
select([table1.alias('foo')]),
"SELECT foo.myid, foo.name, foo.description FROM mytable AS foo")
for dialect in (oracle.dialect(),):
self.assert_compile(
select([table1.alias('foo')]),
"SELECT foo.myid, foo.name, foo.description FROM mytable foo",
dialect=dialect)
self.assert_compile(
select([table1.alias()]),
"SELECT mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM mytable AS mytable_1")
# create a select for a join of two tables. use_labels
# means the column names will have labels tablename_columnname,
# which become the column keys accessible off the Selectable object.
# also, only use one column from the second table and all columns
# from the first table1.
q = select(
[table1, table2.c.otherid],
table1.c.myid == table2.c.otherid, use_labels=True
)
# make an alias of the "selectable". column names
# stay the same (i.e. the labels), table name "changes" to "t2view".
a = alias(q, 't2view')
# select from that alias, also using labels. two levels of labels
# should produce two underscores.
# also, reference the column "mytable_myid" off of the t2view alias.
self.assert_compile(
a.select(a.c.mytable_myid == 9, use_labels=True),
"SELECT t2view.mytable_myid AS t2view_mytable_myid, "
"t2view.mytable_name "
"AS t2view_mytable_name, "
"t2view.mytable_description AS t2view_mytable_description, "
"t2view.myothertable_otherid AS t2view_myothertable_otherid FROM "
"(SELECT mytable.myid AS mytable_myid, "
"mytable.name AS mytable_name, "
"mytable.description AS mytable_description, "
"myothertable.otherid AS "
"myothertable_otherid FROM mytable, myothertable "
"WHERE mytable.myid = "
"myothertable.otherid) AS t2view "
"WHERE t2view.mytable_myid = :mytable_myid_1"
)
def test_prefix(self):
self.assert_compile(
table1.select().prefix_with("SQL_CALC_FOUND_ROWS").
prefix_with("SQL_SOME_WEIRD_MYSQL_THING"),
"SELECT SQL_CALC_FOUND_ROWS SQL_SOME_WEIRD_MYSQL_THING "
"mytable.myid, mytable.name, mytable.description FROM mytable"
)
def test_prefix_dialect_specific(self):
self.assert_compile(
table1.select().prefix_with("SQL_CALC_FOUND_ROWS",
dialect='sqlite').
prefix_with("SQL_SOME_WEIRD_MYSQL_THING",
dialect='mysql'),
"SELECT SQL_SOME_WEIRD_MYSQL_THING "
"mytable.myid, mytable.name, mytable.description FROM mytable",
dialect=mysql.dialect()
)
@testing.emits_warning('.*empty sequence.*')
def test_render_binds_as_literal(self):
"""test a compiler that renders binds inline into
SQL in the columns clause."""
dialect = default.DefaultDialect()
class Compiler(dialect.statement_compiler):
ansi_bind_rules = True
dialect.statement_compiler = Compiler
self.assert_compile(
select([literal("someliteral")]),
"SELECT 'someliteral' AS anon_1",
dialect=dialect
)
self.assert_compile(
select([table1.c.myid + 3]),
"SELECT mytable.myid + 3 AS anon_1 FROM mytable",
dialect=dialect
)
self.assert_compile(
select([table1.c.myid.in_([4, 5, 6])]),
"SELECT mytable.myid IN (4, 5, 6) AS anon_1 FROM mytable",
dialect=dialect
)
self.assert_compile(
select([func.mod(table1.c.myid, 5)]),
"SELECT mod(mytable.myid, 5) AS mod_1 FROM mytable",
dialect=dialect
)
self.assert_compile(
select([literal("foo").in_([])]),
"SELECT 'foo' != 'foo' AS anon_1",
dialect=dialect
)
self.assert_compile(
select([literal(util.b("foo"))]),
"SELECT 'foo' AS anon_1",
dialect=dialect
)
# test callable
self.assert_compile(
select([table1.c.myid == bindparam("foo", callable_=lambda: 5)]),
"SELECT mytable.myid = 5 AS anon_1 FROM mytable",
dialect=dialect
)
assert_raises_message(
exc.CompileError,
"Bind parameter 'foo' without a "
"renderable value not allowed here.",
bindparam("foo").in_(
[]).compile,
dialect=dialect)
def test_literal(self):
self.assert_compile(select([literal('foo')]),
"SELECT :param_1 AS anon_1")
self.assert_compile(
select(
[
literal("foo") +
literal("bar")],
from_obj=[table1]),
"SELECT :param_1 || :param_2 AS anon_1 FROM mytable")
def test_calculated_columns(self):
value_tbl = table('values',
column('id', Integer),
column('val1', Float),
column('val2', Float),
)
self.assert_compile(
select([value_tbl.c.id, (value_tbl.c.val2 -
value_tbl.c.val1) / value_tbl.c.val1]),
"SELECT values.id, (values.val2 - values.val1) "
"/ values.val1 AS anon_1 FROM values"
)
self.assert_compile(
select([
value_tbl.c.id],
(value_tbl.c.val2 - value_tbl.c.val1) /
value_tbl.c.val1 > 2.0),
"SELECT values.id FROM values WHERE "
"(values.val2 - values.val1) / values.val1 > :param_1"
)
self.assert_compile(
select([value_tbl.c.id], value_tbl.c.val1 /
(value_tbl.c.val2 - value_tbl.c.val1) /
value_tbl.c.val1 > 2.0),
"SELECT values.id FROM values WHERE "
"(values.val1 / (values.val2 - values.val1)) "
"/ values.val1 > :param_1"
)
def test_percent_chars(self):
t = table("table%name",
column("percent%"),
column("%(oneofthese)s"),
column("spaces % more spaces"),
)
self.assert_compile(
t.select(use_labels=True),
'''SELECT "table%name"."percent%" AS "table%name_percent%", '''
'''"table%name"."%(oneofthese)s" AS '''
'''"table%name_%(oneofthese)s", '''
'''"table%name"."spaces % more spaces" AS '''
'''"table%name_spaces % '''
'''more spaces" FROM "table%name"'''
)
def test_joins(self):
self.assert_compile(
join(table2, table1, table1.c.myid == table2.c.otherid).select(),
"SELECT myothertable.otherid, myothertable.othername, "
"mytable.myid, mytable.name, mytable.description FROM "
"myothertable JOIN mytable ON mytable.myid = myothertable.otherid"
)
self.assert_compile(
select(
[table1],
from_obj=[join(table1, table2, table1.c.myid
== table2.c.otherid)]
),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable JOIN myothertable ON mytable.myid = myothertable.otherid")
self.assert_compile(
select(
[join(join(table1, table2, table1.c.myid == table2.c.otherid),
table3, table1.c.myid == table3.c.userid)]
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername, "
"thirdtable.userid, "
"thirdtable.otherstuff FROM mytable JOIN myothertable "
"ON mytable.myid ="
" myothertable.otherid JOIN thirdtable ON "
"mytable.myid = thirdtable.userid"
)
self.assert_compile(
join(users, addresses, users.c.user_id ==
addresses.c.user_id).select(),
"SELECT users.user_id, users.user_name, users.password, "
"addresses.address_id, addresses.user_id, addresses.street, "
"addresses.city, addresses.state, addresses.zip "
"FROM users JOIN addresses "
"ON users.user_id = addresses.user_id"
)
self.assert_compile(
select([table1, table2, table3],
from_obj=[join(table1, table2,
table1.c.myid == table2.c.otherid).
outerjoin(table3,
table1.c.myid == table3.c.userid)]
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername, "
"thirdtable.userid,"
" thirdtable.otherstuff FROM mytable "
"JOIN myothertable ON mytable.myid "
"= myothertable.otherid LEFT OUTER JOIN thirdtable "
"ON mytable.myid ="
" thirdtable.userid"
)
self.assert_compile(
select([table1, table2, table3],
from_obj=[outerjoin(table1,
join(table2, table3, table2.c.otherid
== table3.c.userid),
table1.c.myid == table2.c.otherid)]
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername, "
"thirdtable.userid,"
" thirdtable.otherstuff FROM mytable LEFT OUTER JOIN "
"(myothertable "
"JOIN thirdtable ON myothertable.otherid = "
"thirdtable.userid) ON "
"mytable.myid = myothertable.otherid"
)
query = select(
[table1, table2],
or_(
table1.c.name == 'fred',
table1.c.myid == 10,
table2.c.othername != 'jack',
text("EXISTS (select yay from foo where boo = lar)")
),
from_obj=[outerjoin(table1, table2,
table1.c.myid == table2.c.otherid)]
)
self.assert_compile(
query, "SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername "
"FROM mytable LEFT OUTER JOIN myothertable ON mytable.myid = "
"myothertable.otherid WHERE mytable.name = :name_1 OR "
"mytable.myid = :myid_1 OR myothertable.othername != :othername_1 "
"OR EXISTS (select yay from foo where boo = lar)", )
def test_full_outer_join(self):
for spec in [
join(table1, table2, table1.c.myid == table2.c.otherid, full=True),
outerjoin(
table1, table2,
table1.c.myid == table2.c.otherid, full=True),
table1.join(
table2,
table1.c.myid == table2.c.otherid, full=True),
table1.outerjoin(
table2,
table1.c.myid == table2.c.otherid, full=True),
]:
stmt = select([table1]).select_from(spec)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable FULL OUTER JOIN myothertable "
"ON mytable.myid = myothertable.otherid")
def test_compound_selects(self):
assert_raises_message(
exc.ArgumentError,
"All selectables passed to CompoundSelect "
"must have identical numbers of columns; "
"select #1 has 2 columns, select #2 has 3",
union, table3.select(), table1.select()
)
x = union(
select([table1], table1.c.myid == 5),
select([table1], table1.c.myid == 12),
order_by=[table1.c.myid],
)
self.assert_compile(
x, "SELECT mytable.myid, mytable.name, "
"mytable.description "
"FROM mytable WHERE "
"mytable.myid = :myid_1 UNION "
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_2 "
"ORDER BY mytable.myid")
x = union(
select([table1]),
select([table1])
)
x = union(x, select([table1]))
self.assert_compile(
x, "(SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable UNION SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable) UNION SELECT mytable.myid,"
" mytable.name, mytable.description FROM mytable")
u1 = union(
select([table1.c.myid, table1.c.name]),
select([table2]),
select([table3])
)
self.assert_compile(
u1, "SELECT mytable.myid, mytable.name "
"FROM mytable UNION SELECT myothertable.otherid, "
"myothertable.othername FROM myothertable "
"UNION SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable")
assert u1.corresponding_column(table2.c.otherid) is u1.c.myid
self.assert_compile(
union(
select([table1.c.myid, table1.c.name]),
select([table2]),
order_by=['myid'],
offset=10,
limit=5
),
"SELECT mytable.myid, mytable.name "
"FROM mytable UNION SELECT myothertable.otherid, "
"myothertable.othername "
"FROM myothertable ORDER BY myid " # note table name is omitted
"LIMIT :param_1 OFFSET :param_2",
{'param_1': 5, 'param_2': 10}
)
self.assert_compile(
union(
select([table1.c.myid, table1.c.name,
func.max(table1.c.description)],
table1.c.name == 'name2',
group_by=[table1.c.myid, table1.c.name]),
table1.select(table1.c.name == 'name1')
),
"SELECT mytable.myid, mytable.name, "
"max(mytable.description) AS max_1 "
"FROM mytable WHERE mytable.name = :name_1 "
"GROUP BY mytable.myid, "
"mytable.name UNION SELECT mytable.myid, mytable.name, "
"mytable.description "
"FROM mytable WHERE mytable.name = :name_2"
)
self.assert_compile(
union(
select([literal(100).label('value')]),
select([literal(200).label('value')])
),
"SELECT :param_1 AS value UNION SELECT :param_2 AS value"
)
self.assert_compile(
union_all(
select([table1.c.myid]),
union(
select([table2.c.otherid]),
select([table3.c.userid]),
)
),
"SELECT mytable.myid FROM mytable UNION ALL "
"(SELECT myothertable.otherid FROM myothertable UNION "
"SELECT thirdtable.userid FROM thirdtable)"
)
s = select([column('foo'), column('bar')])
self.assert_compile(
union(
s.order_by("foo"),
s.order_by("bar")),
"(SELECT foo, bar ORDER BY foo) UNION "
"(SELECT foo, bar ORDER BY bar)")
self.assert_compile(
union(s.order_by("foo").self_group(),
s.order_by("bar").limit(10).self_group()),
"(SELECT foo, bar ORDER BY foo) UNION (SELECT foo, "
"bar ORDER BY bar LIMIT :param_1)",
{'param_1': 10}
)
def test_compound_grouping(self):
s = select([column('foo'), column('bar')]).select_from(text('bat'))
self.assert_compile(
union(union(union(s, s), s), s),
"((SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat) "
"UNION SELECT foo, bar FROM bat) UNION SELECT foo, bar FROM bat"
)
self.assert_compile(
union(s, s, s, s),
"SELECT foo, bar FROM bat UNION SELECT foo, bar "
"FROM bat UNION SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat"
)
self.assert_compile(
union(s, union(s, union(s, s))),
"SELECT foo, bar FROM bat UNION (SELECT foo, bar FROM bat "
"UNION (SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat))"
)
self.assert_compile(
select([s.alias()]),
'SELECT anon_1.foo, anon_1.bar FROM '
'(SELECT foo, bar FROM bat) AS anon_1'
)
self.assert_compile(
select([union(s, s).alias()]),
'SELECT anon_1.foo, anon_1.bar FROM '
'(SELECT foo, bar FROM bat UNION '
'SELECT foo, bar FROM bat) AS anon_1'
)
self.assert_compile(
select([except_(s, s).alias()]),
'SELECT anon_1.foo, anon_1.bar FROM '
'(SELECT foo, bar FROM bat EXCEPT '
'SELECT foo, bar FROM bat) AS anon_1'
)
# this query sqlite specifically chokes on
self.assert_compile(
union(
except_(s, s),
s
),
"(SELECT foo, bar FROM bat EXCEPT SELECT foo, bar FROM bat) "
"UNION SELECT foo, bar FROM bat"
)
self.assert_compile(
union(
s,
except_(s, s),
),
"SELECT foo, bar FROM bat "
"UNION (SELECT foo, bar FROM bat EXCEPT SELECT foo, bar FROM bat)"
)
# this solves it
self.assert_compile(
union(
except_(s, s).alias().select(),
s
),
"SELECT anon_1.foo, anon_1.bar FROM "
"(SELECT foo, bar FROM bat EXCEPT "
"SELECT foo, bar FROM bat) AS anon_1 "
"UNION SELECT foo, bar FROM bat"
)
self.assert_compile(
except_(
union(s, s),
union(s, s)
),
"(SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat) "
"EXCEPT (SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat)"
)
s2 = union(s, s)
s3 = union(s2, s2)
self.assert_compile(s3, "(SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat) "
"UNION (SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat)")
self.assert_compile(
union(
intersect(s, s),
intersect(s, s)
),
"(SELECT foo, bar FROM bat INTERSECT SELECT foo, bar FROM bat) "
"UNION (SELECT foo, bar FROM bat INTERSECT "
"SELECT foo, bar FROM bat)"
)
# tests for [ticket:2528]
# sqlite hates all of these.
self.assert_compile(
union(
s.limit(1),
s.offset(2)
),
"(SELECT foo, bar FROM bat LIMIT :param_1) "
"UNION (SELECT foo, bar FROM bat LIMIT -1 OFFSET :param_2)"
)
self.assert_compile(
union(
s.order_by(column('bar')),
s.offset(2)
),
"(SELECT foo, bar FROM bat ORDER BY bar) "
"UNION (SELECT foo, bar FROM bat LIMIT -1 OFFSET :param_1)"
)
self.assert_compile(
union(
s.limit(1).alias('a'),
s.limit(2).alias('b')
),
"(SELECT foo, bar FROM bat LIMIT :param_1) "
"UNION (SELECT foo, bar FROM bat LIMIT :param_2)"
)
self.assert_compile(
union(
s.limit(1).self_group(),
s.limit(2).self_group()
),
"(SELECT foo, bar FROM bat LIMIT :param_1) "
"UNION (SELECT foo, bar FROM bat LIMIT :param_2)"
)
self.assert_compile(
union(s.limit(1), s.limit(2).offset(3)).alias().select(),
"SELECT anon_1.foo, anon_1.bar FROM "
"((SELECT foo, bar FROM bat LIMIT :param_1) "
"UNION (SELECT foo, bar FROM bat LIMIT :param_2 OFFSET :param_3)) "
"AS anon_1"
)
# this version works for SQLite
self.assert_compile(
union(
s.limit(1).alias().select(),
s.offset(2).alias().select(),
),
"SELECT anon_1.foo, anon_1.bar "
"FROM (SELECT foo, bar FROM bat"
" LIMIT :param_1) AS anon_1 "
"UNION SELECT anon_2.foo, anon_2.bar "
"FROM (SELECT foo, bar "
"FROM bat"
" LIMIT -1 OFFSET :param_2) AS anon_2"
)
def test_binds(self):
for (
stmt,
expected_named_stmt,
expected_positional_stmt,
expected_default_params_dict,
expected_default_params_list,
test_param_dict,
expected_test_params_dict,
expected_test_params_list
) in [
(
select(
[table1, table2],
and_(
table1.c.myid == table2.c.otherid,
table1.c.name == bindparam('mytablename')
)),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable WHERE mytable.myid = myothertable.otherid "
"AND mytable.name = :mytablename",
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable WHERE mytable.myid = myothertable.otherid AND "
"mytable.name = ?",
{'mytablename': None}, [None],
{'mytablename': 5}, {'mytablename': 5}, [5]
),
(
select([table1], or_(table1.c.myid == bindparam('myid'),
table2.c.otherid == bindparam('myid'))),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = :myid "
"OR myothertable.otherid = :myid",
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = ? "
"OR myothertable.otherid = ?",
{'myid': None}, [None, None],
{'myid': 5}, {'myid': 5}, [5, 5]
),
(
text("SELECT mytable.myid, mytable.name, "
"mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = :myid OR "
"myothertable.otherid = :myid"),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = :myid OR "
"myothertable.otherid = :myid",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = ? OR "
"myothertable.otherid = ?",
{'myid': None}, [None, None],
{'myid': 5}, {'myid': 5}, [5, 5]
),
(
select([table1], or_(table1.c.myid ==
bindparam('myid', unique=True),
table2.c.otherid ==
bindparam('myid', unique=True))),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid_1 OR myothertable.otherid = :myid_2",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = ? "
"OR myothertable.otherid = ?",
{'myid_1': None, 'myid_2': None}, [None, None],
{'myid_1': 5, 'myid_2': 6}, {'myid_1': 5, 'myid_2': 6}, [5, 6]
),
(
bindparam('test', type_=String, required=False) + text("'hi'"),
":test || 'hi'",
"? || 'hi'",
{'test': None}, [None],
{}, {'test': None}, [None]
),
(
# testing select.params() here - bindparam() objects
# must get required flag set to False
select(
[table1],
or_(
table1.c.myid == bindparam('myid'),
table2.c.otherid == bindparam('myotherid')
)).params({'myid': 8, 'myotherid': 7}),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid OR myothertable.otherid = :myotherid",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
"? OR myothertable.otherid = ?",
{'myid': 8, 'myotherid': 7}, [8, 7],
{'myid': 5}, {'myid': 5, 'myotherid': 7}, [5, 7]
),
(
select([table1], or_(table1.c.myid ==
bindparam('myid', value=7, unique=True),
table2.c.otherid ==
bindparam('myid', value=8, unique=True))),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid_1 OR myothertable.otherid = :myid_2",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
"? OR myothertable.otherid = ?",
{'myid_1': 7, 'myid_2': 8}, [7, 8],
{'myid_1': 5, 'myid_2': 6}, {'myid_1': 5, 'myid_2': 6}, [5, 6]
),
]:
self.assert_compile(stmt, expected_named_stmt,
params=expected_default_params_dict)
self.assert_compile(stmt, expected_positional_stmt,
dialect=sqlite.dialect())
nonpositional = stmt.compile()
positional = stmt.compile(dialect=sqlite.dialect())
pp = positional.params
eq_([pp[k] for k in positional.positiontup],
expected_default_params_list)
eq_(nonpositional.construct_params(test_param_dict),
expected_test_params_dict)
pp = positional.construct_params(test_param_dict)
eq_(
[pp[k] for k in positional.positiontup],
expected_test_params_list
)
# check that params() doesn't modify original statement
s = select([table1], or_(table1.c.myid == bindparam('myid'),
table2.c.otherid ==
bindparam('myotherid')))
s2 = s.params({'myid': 8, 'myotherid': 7})
s3 = s2.params({'myid': 9})
assert s.compile().params == {'myid': None, 'myotherid': None}
assert s2.compile().params == {'myid': 8, 'myotherid': 7}
assert s3.compile().params == {'myid': 9, 'myotherid': 7}
# test using same 'unique' param object twice in one compile
s = select([table1.c.myid]).where(table1.c.myid == 12).as_scalar()
s2 = select([table1, s], table1.c.myid == s)
self.assert_compile(
s2, "SELECT mytable.myid, mytable.name, mytable.description, "
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = "
":myid_1) AS anon_1 FROM mytable WHERE mytable.myid = "
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)")
positional = s2.compile(dialect=sqlite.dialect())
pp = positional.params
assert [pp[k] for k in positional.positiontup] == [12, 12]
# check that conflicts with "unique" params are caught
s = select([table1], or_(table1.c.myid == 7,
table1.c.myid == bindparam('myid_1')))
assert_raises_message(exc.CompileError,
"conflicts with unique bind parameter "
"of the same name",
str, s)
s = select([table1], or_(table1.c.myid == 7, table1.c.myid == 8,
table1.c.myid == bindparam('myid_1')))
assert_raises_message(exc.CompileError,
"conflicts with unique bind parameter "
"of the same name",
str, s)
def _test_binds_no_hash_collision(self):
"""test that construct_params doesn't corrupt dict
due to hash collisions"""
total_params = 100000
in_clause = [':in%d' % i for i in range(total_params)]
params = dict(('in%d' % i, i) for i in range(total_params))
t = text('text clause %s' % ', '.join(in_clause))
eq_(len(t.bindparams), total_params)
c = t.compile()
pp = c.construct_params(params)
eq_(len(set(pp)), total_params, '%s %s' % (len(set(pp)), len(pp)))
eq_(len(set(pp.values())), total_params)
def test_bind_as_col(self):
t = table('foo', column('id'))
s = select([t, literal('lala').label('hoho')])
self.assert_compile(s, "SELECT foo.id, :param_1 AS hoho FROM foo")
assert [str(c) for c in s.c] == ["id", "hoho"]
def test_bind_callable(self):
expr = column('x') == bindparam("key", callable_=lambda: 12)
self.assert_compile(
expr,
"x = :key",
{'x': 12}
)
def test_bind_params_missing(self):
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x'",
select(
[table1]).where(
and_(
table1.c.myid == bindparam("x", required=True),
table1.c.name == bindparam("y", required=True)
)
).compile().construct_params,
params=dict(y=5)
)
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x'",
select(
[table1]).where(
table1.c.myid == bindparam(
"x",
required=True)).compile().construct_params)
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x', "
"in parameter group 2",
select(
[table1]).where(
and_(
table1.c.myid == bindparam("x", required=True),
table1.c.name == bindparam("y", required=True)
)
).compile().construct_params,
params=dict(y=5), _group_number=2)
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x', "
"in parameter group 2",
select(
[table1]).where(
table1.c.myid == bindparam(
"x",
required=True)).compile().construct_params,
_group_number=2)
def test_tuple(self):
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
[(1, 'foo'), (5, 'bar')]),
"(mytable.myid, mytable.name) IN "
"((:param_1, :param_2), (:param_3, :param_4))"
)
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
[tuple_(table2.c.otherid, table2.c.othername)]
),
"(mytable.myid, mytable.name) IN "
"((myothertable.otherid, myothertable.othername))"
)
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
select([table2.c.otherid, table2.c.othername])
),
"(mytable.myid, mytable.name) IN (SELECT "
"myothertable.otherid, myothertable.othername FROM myothertable)"
)
def test_cast(self):
tbl = table('casttest',
column('id', Integer),
column('v1', Float),
column('v2', Float),
column('ts', TIMESTAMP),
)
def check_results(dialect, expected_results, literal):
eq_(len(expected_results), 5,
'Incorrect number of expected results')
eq_(str(cast(tbl.c.v1, Numeric).compile(dialect=dialect)),
'CAST(casttest.v1 AS %s)' % expected_results[0])
eq_(str(tbl.c.v1.cast(Numeric).compile(dialect=dialect)),
'CAST(casttest.v1 AS %s)' % expected_results[0])
eq_(str(cast(tbl.c.v1, Numeric(12, 9)).compile(dialect=dialect)),
'CAST(casttest.v1 AS %s)' % expected_results[1])
eq_(str(cast(tbl.c.ts, Date).compile(dialect=dialect)),
'CAST(casttest.ts AS %s)' % expected_results[2])
eq_(str(cast(1234, Text).compile(dialect=dialect)),
'CAST(%s AS %s)' % (literal, expected_results[3]))
eq_(str(cast('test', String(20)).compile(dialect=dialect)),
'CAST(%s AS %s)' % (literal, expected_results[4]))
# fixme: shoving all of this dialect-specific stuff in one test
# is now officialy completely ridiculous AND non-obviously omits
# coverage on other dialects.
sel = select([tbl, cast(tbl.c.v1, Numeric)]).compile(
dialect=dialect)
if isinstance(dialect, type(mysql.dialect())):
eq_(str(sel),
"SELECT casttest.id, casttest.v1, casttest.v2, "
"casttest.ts, "
"CAST(casttest.v1 AS DECIMAL) AS anon_1 \nFROM casttest")
else:
eq_(str(sel),
"SELECT casttest.id, casttest.v1, casttest.v2, "
"casttest.ts, CAST(casttest.v1 AS NUMERIC) AS "
"anon_1 \nFROM casttest")
# first test with PostgreSQL engine
check_results(
postgresql.dialect(), [
'NUMERIC', 'NUMERIC(12, 9)', 'DATE', 'TEXT', 'VARCHAR(20)'],
'%(param_1)s')
# then the Oracle engine
check_results(
oracle.dialect(), [
'NUMERIC', 'NUMERIC(12, 9)', 'DATE',
'CLOB', 'VARCHAR2(20 CHAR)'],
':param_1')
# then the sqlite engine
check_results(sqlite.dialect(), ['NUMERIC', 'NUMERIC(12, 9)',
'DATE', 'TEXT', 'VARCHAR(20)'], '?')
# then the MySQL engine
check_results(mysql.dialect(), ['DECIMAL', 'DECIMAL(12, 9)',
'DATE', 'CHAR', 'CHAR(20)'], '%s')
self.assert_compile(cast(text('NULL'), Integer),
'CAST(NULL AS INTEGER)',
dialect=sqlite.dialect())
self.assert_compile(cast(null(), Integer),
'CAST(NULL AS INTEGER)',
dialect=sqlite.dialect())
self.assert_compile(cast(literal_column('NULL'), Integer),
'CAST(NULL AS INTEGER)',
dialect=sqlite.dialect())
def test_over(self):
self.assert_compile(
func.row_number().over(),
"row_number() OVER ()"
)
self.assert_compile(
func.row_number().over(
order_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (ORDER BY mytable.name, mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (PARTITION BY mytable.name, "
"mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=[table1.c.name],
order_by=[table1.c.description]
),
"row_number() OVER (PARTITION BY mytable.name "
"ORDER BY mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=table1.c.name,
order_by=table1.c.description
),
"row_number() OVER (PARTITION BY mytable.name "
"ORDER BY mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=table1.c.name,
order_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (PARTITION BY mytable.name "
"ORDER BY mytable.name, mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=[],
order_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (ORDER BY mytable.name, mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=[table1.c.name, table1.c.description],
order_by=[]
),
"row_number() OVER (PARTITION BY mytable.name, "
"mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=[],
order_by=[]
),
"row_number() OVER ()"
)
self.assert_compile(
select([func.row_number().over(
order_by=table1.c.description
).label('foo')]),
"SELECT row_number() OVER (ORDER BY mytable.description) "
"AS foo FROM mytable"
)
# test from_obj generation.
# from func:
self.assert_compile(
select([
func.max(table1.c.name).over(
partition_by=['description']
)
]),
"SELECT max(mytable.name) OVER (PARTITION BY mytable.description) "
"AS anon_1 FROM mytable"
)
# from partition_by
self.assert_compile(
select([
func.row_number().over(
partition_by=[table1.c.name]
)
]),
"SELECT row_number() OVER (PARTITION BY mytable.name) "
"AS anon_1 FROM mytable"
)
# from order_by
self.assert_compile(
select([
func.row_number().over(
order_by=table1.c.name
)
]),
"SELECT row_number() OVER (ORDER BY mytable.name) "
"AS anon_1 FROM mytable"
)
# this tests that _from_objects
# concantenates OK
self.assert_compile(
select([column("x") + over(func.foo())]),
"SELECT x + foo() OVER () AS anon_1"
)
# test a reference to a label that in the referecned selectable;
# this resolves
expr = (table1.c.myid + 5).label('sum')
stmt = select([expr]).alias()
self.assert_compile(
select([stmt.c.sum, func.row_number().over(order_by=stmt.c.sum)]),
"SELECT anon_1.sum, row_number() OVER (ORDER BY anon_1.sum) "
"AS anon_2 FROM (SELECT mytable.myid + :myid_1 AS sum "
"FROM mytable) AS anon_1"
)
# test a reference to a label that's at the same level as the OVER
# in the columns clause; doesn't resolve
expr = (table1.c.myid + 5).label('sum')
self.assert_compile(
select([expr, func.row_number().over(order_by=expr)]),
"SELECT mytable.myid + :myid_1 AS sum, "
"row_number() OVER "
"(ORDER BY mytable.myid + :myid_1) AS anon_1 FROM mytable"
)
def test_date_between(self):
import datetime
table = Table('dt', metadata,
Column('date', Date))
self.assert_compile(
table.select(table.c.date.between(datetime.date(2006, 6, 1),
datetime.date(2006, 6, 5))),
"SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2",
checkparams={'date_1': datetime.date(2006, 6, 1),
'date_2': datetime.date(2006, 6, 5)})
self.assert_compile(
table.select(sql.between(table.c.date, datetime.date(2006, 6, 1),
datetime.date(2006, 6, 5))),
"SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2",
checkparams={'date_1': datetime.date(2006, 6, 1),
'date_2': datetime.date(2006, 6, 5)})
def test_delayed_col_naming(self):
my_str = Column(String)
sel1 = select([my_str])
assert_raises_message(
exc.InvalidRequestError,
"Cannot initialize a sub-selectable with this Column",
lambda: sel1.c
)
# calling label or as_scalar doesn't compile
# anything.
sel2 = select([func.substr(my_str, 2, 3)]).label('my_substr')
assert_raises_message(
exc.CompileError,
"Cannot compile Column object until its 'name' is assigned.",
str, sel2
)
sel3 = select([my_str]).as_scalar()
assert_raises_message(
exc.CompileError,
"Cannot compile Column object until its 'name' is assigned.",
str, sel3
)
my_str.name = 'foo'
self.assert_compile(
sel1,
"SELECT foo",
)
self.assert_compile(
sel2,
'(SELECT substr(foo, :substr_2, :substr_3) AS substr_1)',
)
self.assert_compile(
sel3,
"(SELECT foo)"
)
def test_naming(self):
# TODO: the part where we check c.keys() are not "compile" tests, they
# belong probably in test_selectable, or some broken up
# version of that suite
f1 = func.hoho(table1.c.name)
s1 = select([table1.c.myid, table1.c.myid.label('foobar'),
f1,
func.lala(table1.c.name).label('gg')])
eq_(
list(s1.c.keys()),
['myid', 'foobar', str(f1), 'gg']
)
meta = MetaData()
t1 = Table('mytable', meta, Column('col1', Integer))
exprs = (
table1.c.myid == 12,
func.hoho(table1.c.myid),
cast(table1.c.name, Numeric),
literal('x'),
)
for col, key, expr, lbl in (
(table1.c.name, 'name', 'mytable.name', None),
(exprs[0], str(exprs[0]), 'mytable.myid = :myid_1', 'anon_1'),
(exprs[1], str(exprs[1]), 'hoho(mytable.myid)', 'hoho_1'),
(exprs[2], str(exprs[2]),
'CAST(mytable.name AS NUMERIC)', 'anon_1'),
(t1.c.col1, 'col1', 'mytable.col1', None),
(column('some wacky thing'), 'some wacky thing',
'"some wacky thing"', ''),
(exprs[3], exprs[3].key, ":param_1", "anon_1")
):
if getattr(col, 'table', None) is not None:
t = col.table
else:
t = table1
s1 = select([col], from_obj=t)
assert list(s1.c.keys()) == [key], list(s1.c.keys())
if lbl:
self.assert_compile(
s1, "SELECT %s AS %s FROM mytable" %
(expr, lbl))
else:
self.assert_compile(s1, "SELECT %s FROM mytable" % (expr,))
s1 = select([s1])
if lbl:
self.assert_compile(
s1, "SELECT %s FROM (SELECT %s AS %s FROM mytable)" %
(lbl, expr, lbl))
elif col.table is not None:
# sqlite rule labels subquery columns
self.assert_compile(
s1, "SELECT %s FROM (SELECT %s AS %s FROM mytable)" %
(key, expr, key))
else:
self.assert_compile(s1,
"SELECT %s FROM (SELECT %s FROM mytable)" %
(expr, expr))
def test_hints(self):
s = select([table1.c.myid]).with_hint(table1, "test hint %(name)s")
s2 = select([table1.c.myid]).\
with_hint(table1, "index(%(name)s idx)", 'oracle').\
with_hint(table1, "WITH HINT INDEX idx", 'sybase')
a1 = table1.alias()
s3 = select([a1.c.myid]).with_hint(a1, "index(%(name)s hint)")
subs4 = select([
table1, table2
]).select_from(
table1.join(table2, table1.c.myid == table2.c.otherid)).\
with_hint(table1, 'hint1')
s4 = select([table3]).select_from(
table3.join(
subs4,
subs4.c.othername == table3.c.otherstuff
)
).\
with_hint(table3, 'hint3')
t1 = table('QuotedName', column('col1'))
s6 = select([t1.c.col1]).where(t1.c.col1 > 10).\
with_hint(t1, '%(name)s idx1')
a2 = t1.alias('SomeName')
s7 = select([a2.c.col1]).where(a2.c.col1 > 10).\
with_hint(a2, '%(name)s idx1')
mysql_d, oracle_d, sybase_d = \
mysql.dialect(), \
oracle.dialect(), \
sybase.dialect()
for stmt, dialect, expected in [
(s, mysql_d,
"SELECT mytable.myid FROM mytable test hint mytable"),
(s, oracle_d,
"SELECT /*+ test hint mytable */ mytable.myid FROM mytable"),
(s, sybase_d,
"SELECT mytable.myid FROM mytable test hint mytable"),
(s2, mysql_d,
"SELECT mytable.myid FROM mytable"),
(s2, oracle_d,
"SELECT /*+ index(mytable idx) */ mytable.myid FROM mytable"),
(s2, sybase_d,
"SELECT mytable.myid FROM mytable WITH HINT INDEX idx"),
(s3, mysql_d,
"SELECT mytable_1.myid FROM mytable AS mytable_1 "
"index(mytable_1 hint)"),
(s3, oracle_d,
"SELECT /*+ index(mytable_1 hint) */ mytable_1.myid FROM "
"mytable mytable_1"),
(s3, sybase_d,
"SELECT mytable_1.myid FROM mytable AS mytable_1 "
"index(mytable_1 hint)"),
(s4, mysql_d,
"SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable "
"hint3 INNER JOIN (SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid, "
"myothertable.othername FROM mytable hint1 INNER "
"JOIN myothertable ON mytable.myid = myothertable.otherid) "
"ON othername = thirdtable.otherstuff"),
(s4, sybase_d,
"SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable "
"hint3 JOIN (SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid, "
"myothertable.othername FROM mytable hint1 "
"JOIN myothertable ON mytable.myid = myothertable.otherid) "
"ON othername = thirdtable.otherstuff"),
(s4, oracle_d,
"SELECT /*+ hint3 */ thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable JOIN (SELECT /*+ hint1 */ mytable.myid,"
" mytable.name, mytable.description, myothertable.otherid,"
" myothertable.othername FROM mytable JOIN myothertable ON"
" mytable.myid = myothertable.otherid) ON othername ="
" thirdtable.otherstuff"),
# TODO: figure out dictionary ordering solution here
# (s5, oracle_d,
# "SELECT /*+ hint3 */ /*+ hint1 */ thirdtable.userid, "
# "thirdtable.otherstuff "
# "FROM thirdtable JOIN (SELECT mytable.myid,"
# " mytable.name, mytable.description, myothertable.otherid,"
# " myothertable.othername FROM mytable JOIN myothertable ON"
# " mytable.myid = myothertable.otherid) ON othername ="
# " thirdtable.otherstuff"),
(s6, oracle_d,
"""SELECT /*+ "QuotedName" idx1 */ "QuotedName".col1 """
"""FROM "QuotedName" WHERE "QuotedName".col1 > :col1_1"""),
(s7, oracle_d,
"""SELECT /*+ "SomeName" idx1 */ "SomeName".col1 FROM """
""""QuotedName" "SomeName" WHERE "SomeName".col1 > :col1_1"""),
]:
self.assert_compile(
stmt,
expected,
dialect=dialect
)
def test_statement_hints(self):
stmt = select([table1.c.myid]).\
with_statement_hint("test hint one").\
with_statement_hint("test hint two", 'mysql')
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable test hint one",
)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable test hint one test hint two",
dialect='mysql'
)
def test_literal_as_text_fromstring(self):
self.assert_compile(
and_(text("a"), text("b")),
"a AND b"
)
def test_literal_as_text_nonstring_raise(self):
assert_raises(exc.ArgumentError,
and_, ("a",), ("b",)
)
class UnsupportedTest(fixtures.TestBase):
def test_unsupported_element_str_visit_name(self):
from sqlalchemy.sql.expression import ClauseElement
class SomeElement(ClauseElement):
__visit_name__ = 'some_element'
assert_raises_message(
exc.UnsupportedCompilationError,
r"Compiler <sqlalchemy.sql.compiler.StrSQLCompiler .*"
r"can't render element of type <class '.*SomeElement'>",
SomeElement().compile
)
def test_unsupported_element_meth_visit_name(self):
from sqlalchemy.sql.expression import ClauseElement
class SomeElement(ClauseElement):
@classmethod
def __visit_name__(cls):
return "some_element"
assert_raises_message(
exc.UnsupportedCompilationError,
r"Compiler <sqlalchemy.sql.compiler.StrSQLCompiler .*"
r"can't render element of type <class '.*SomeElement'>",
SomeElement().compile
)
def test_unsupported_operator(self):
from sqlalchemy.sql.expression import BinaryExpression
def myop(x, y):
pass
binary = BinaryExpression(column("foo"), column("bar"), myop)
assert_raises_message(
exc.UnsupportedCompilationError,
r"Compiler <sqlalchemy.sql.compiler.StrSQLCompiler .*"
r"can't render element of type <function.*",
binary.compile
)
class StringifySpecialTest(fixtures.TestBase):
def test_basic(self):
stmt = select([table1]).where(table1.c.myid == 10)
eq_ignore_whitespace(
str(stmt),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1"
)
def test_cte(self):
# stringify of these was supported anyway by defaultdialect.
stmt = select([table1.c.myid]).cte()
stmt = select([stmt])
eq_ignore_whitespace(
str(stmt),
"WITH anon_1 AS (SELECT mytable.myid AS myid FROM mytable) "
"SELECT anon_1.myid FROM anon_1"
)
def test_returning(self):
stmt = table1.insert().returning(table1.c.myid)
eq_ignore_whitespace(
str(stmt),
"INSERT INTO mytable (myid, name, description) "
"VALUES (:myid, :name, :description) RETURNING mytable.myid"
)
def test_array_index(self):
stmt = select([column('foo', types.ARRAY(Integer))[5]])
eq_ignore_whitespace(
str(stmt),
"SELECT foo[:foo_1] AS anon_1"
)
def test_unknown_type(self):
class MyType(types.TypeEngine):
__visit_name__ = 'mytype'
stmt = select([cast(table1.c.myid, MyType)])
eq_ignore_whitespace(
str(stmt),
"SELECT CAST(mytable.myid AS MyType) AS anon_1 FROM mytable"
)
def test_within_group(self):
# stringify of these was supported anyway by defaultdialect.
from sqlalchemy import within_group
stmt = select([
table1.c.myid,
within_group(
func.percentile_cont(0.5),
table1.c.name.desc()
)
])
eq_ignore_whitespace(
str(stmt),
"SELECT mytable.myid, percentile_cont(:percentile_cont_1) "
"WITHIN GROUP (ORDER BY mytable.name DESC) AS anon_1 FROM mytable"
)
class KwargPropagationTest(fixtures.TestBase):
@classmethod
def setup_class(cls):
from sqlalchemy.sql.expression import ColumnClause, TableClause
class CatchCol(ColumnClause):
pass
class CatchTable(TableClause):
pass
cls.column = CatchCol("x")
cls.table = CatchTable("y")
cls.criterion = cls.column == CatchCol('y')
@compiles(CatchCol)
def compile_col(element, compiler, **kw):
assert "canary" in kw
return compiler.visit_column(element)
@compiles(CatchTable)
def compile_table(element, compiler, **kw):
assert "canary" in kw
return compiler.visit_table(element)
def _do_test(self, element):
d = default.DefaultDialect()
d.statement_compiler(d, element,
compile_kwargs={"canary": True})
def test_binary(self):
self._do_test(self.column == 5)
def test_select(self):
s = select([self.column]).select_from(self.table).\
where(self.column == self.criterion).\
order_by(self.column)
self._do_test(s)
def test_case(self):
c = case([(self.criterion, self.column)], else_=self.column)
self._do_test(c)
def test_cast(self):
c = cast(self.column, Integer)
self._do_test(c)
class CRUDTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_insert_literal_binds(self):
stmt = table1.insert().values(myid=3, name='jack')
self.assert_compile(
stmt,
"INSERT INTO mytable (myid, name) VALUES (3, 'jack')",
literal_binds=True)
def test_update_literal_binds(self):
stmt = table1.update().values(name='jack').\
where(table1.c.name == 'jill')
self.assert_compile(
stmt,
"UPDATE mytable SET name='jack' WHERE mytable.name = 'jill'",
literal_binds=True)
def test_delete_literal_binds(self):
stmt = table1.delete().where(table1.c.name == 'jill')
self.assert_compile(
stmt,
"DELETE FROM mytable WHERE mytable.name = 'jill'",
literal_binds=True)
def test_correlated_update(self):
# test against a straight text subquery
u = update(
table1,
values={
table1.c.name:
text("(select name from mytable where id=mytable.id)")
}
)
self.assert_compile(
u,
"UPDATE mytable SET name=(select name from mytable "
"where id=mytable.id)")
mt = table1.alias()
u = update(table1, values={
table1.c.name:
select([mt.c.name], mt.c.myid == table1.c.myid)
})
self.assert_compile(
u, "UPDATE mytable SET name=(SELECT mytable_1.name FROM "
"mytable AS mytable_1 WHERE "
"mytable_1.myid = mytable.myid)")
# test against a regular constructed subquery
s = select([table2], table2.c.otherid == table1.c.myid)
u = update(table1, table1.c.name == 'jack', values={table1.c.name: s})
self.assert_compile(
u, "UPDATE mytable SET name=(SELECT myothertable.otherid, "
"myothertable.othername FROM myothertable WHERE "
"myothertable.otherid = mytable.myid) "
"WHERE mytable.name = :name_1")
# test a non-correlated WHERE clause
s = select([table2.c.othername], table2.c.otherid == 7)
u = update(table1, table1.c.name == s)
self.assert_compile(u,
"UPDATE mytable SET myid=:myid, name=:name, "
"description=:description WHERE mytable.name = "
"(SELECT myothertable.othername FROM myothertable "
"WHERE myothertable.otherid = :otherid_1)")
# test one that is actually correlated...
s = select([table2.c.othername], table2.c.otherid == table1.c.myid)
u = table1.update(table1.c.name == s)
self.assert_compile(u,
"UPDATE mytable SET myid=:myid, name=:name, "
"description=:description WHERE mytable.name = "
"(SELECT myothertable.othername FROM myothertable "
"WHERE myothertable.otherid = mytable.myid)")
# test correlated FROM implicit in WHERE and SET clauses
u = table1.update().values(name=table2.c.othername)\
.where(table2.c.otherid == table1.c.myid)
self.assert_compile(
u, "UPDATE mytable SET name=myothertable.othername "
"FROM myothertable WHERE myothertable.otherid = mytable.myid")
u = table1.update().values(name='foo')\
.where(table2.c.otherid == table1.c.myid)
self.assert_compile(
u, "UPDATE mytable SET name=:name "
"FROM myothertable WHERE myothertable.otherid = mytable.myid")
self.assert_compile(u,
"UPDATE mytable SET name=:name "
"FROM mytable, myothertable WHERE "
"myothertable.otherid = mytable.myid",
dialect=mssql.dialect())
self.assert_compile(u.where(table2.c.othername == mt.c.name),
"UPDATE mytable SET name=:name "
"FROM mytable, myothertable, mytable AS mytable_1 "
"WHERE myothertable.otherid = mytable.myid "
"AND myothertable.othername = mytable_1.name",
dialect=mssql.dialect())
def test_binds_that_match_columns(self):
"""test bind params named after column names
replace the normal SET/VALUES generation."""
t = table('foo', column('x'), column('y'))
u = t.update().where(t.c.x == bindparam('x'))
assert_raises(exc.CompileError, u.compile)
self.assert_compile(u, "UPDATE foo SET WHERE foo.x = :x", params={})
assert_raises(exc.CompileError, u.values(x=7).compile)
self.assert_compile(u.values(y=7),
"UPDATE foo SET y=:y WHERE foo.x = :x")
assert_raises(exc.CompileError,
u.values(x=7).compile, column_keys=['x', 'y'])
assert_raises(exc.CompileError, u.compile, column_keys=['x', 'y'])
self.assert_compile(
u.values(
x=3 +
bindparam('x')),
"UPDATE foo SET x=(:param_1 + :x) WHERE foo.x = :x")
self.assert_compile(
u.values(
x=3 +
bindparam('x')),
"UPDATE foo SET x=(:param_1 + :x) WHERE foo.x = :x",
params={
'x': 1})
self.assert_compile(
u.values(
x=3 +
bindparam('x')),
"UPDATE foo SET x=(:param_1 + :x), y=:y WHERE foo.x = :x",
params={
'x': 1,
'y': 2})
i = t.insert().values(x=3 + bindparam('x'))
self.assert_compile(i,
"INSERT INTO foo (x) VALUES ((:param_1 + :x))")
self.assert_compile(
i,
"INSERT INTO foo (x, y) VALUES ((:param_1 + :x), :y)",
params={
'x': 1,
'y': 2})
i = t.insert().values(x=bindparam('y'))
self.assert_compile(i, "INSERT INTO foo (x) VALUES (:y)")
i = t.insert().values(x=bindparam('y'), y=5)
assert_raises(exc.CompileError, i.compile)
i = t.insert().values(x=3 + bindparam('y'), y=5)
assert_raises(exc.CompileError, i.compile)
i = t.insert().values(x=3 + bindparam('x2'))
self.assert_compile(i,
"INSERT INTO foo (x) VALUES ((:param_1 + :x2))")
self.assert_compile(
i,
"INSERT INTO foo (x) VALUES ((:param_1 + :x2))",
params={})
self.assert_compile(
i,
"INSERT INTO foo (x, y) VALUES ((:param_1 + :x2), :y)",
params={
'x': 1,
'y': 2})
self.assert_compile(
i,
"INSERT INTO foo (x, y) VALUES ((:param_1 + :x2), :y)",
params={
'x2': 1,
'y': 2})
def test_labels_no_collision(self):
t = table('foo', column('id'), column('foo_id'))
self.assert_compile(
t.update().where(t.c.id == 5),
"UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :id_1"
)
self.assert_compile(
t.update().where(t.c.id == bindparam(key=t.c.id._label)),
"UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :foo_id_1"
)
class DDLTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def _illegal_type_fixture(self):
class MyType(types.TypeEngine):
pass
@compiles(MyType)
def compile(element, compiler, **kw):
raise exc.CompileError("Couldn't compile type")
return MyType
def test_reraise_of_column_spec_issue(self):
MyType = self._illegal_type_fixture()
t1 = Table('t', MetaData(),
Column('x', MyType())
)
assert_raises_message(
exc.CompileError,
r"\(in table 't', column 'x'\): Couldn't compile type",
schema.CreateTable(t1).compile
)
def test_reraise_of_column_spec_issue_unicode(self):
MyType = self._illegal_type_fixture()
t1 = Table('t', MetaData(),
Column(u('méil'), MyType())
)
assert_raises_message(
exc.CompileError,
u(r"\(in table 't', column 'méil'\): Couldn't compile type"),
schema.CreateTable(t1).compile
)
def test_system_flag(self):
m = MetaData()
t = Table('t', m, Column('x', Integer),
Column('y', Integer, system=True),
Column('z', Integer))
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t (x INTEGER, z INTEGER)"
)
m2 = MetaData()
t2 = t.tometadata(m2)
self.assert_compile(
schema.CreateTable(t2),
"CREATE TABLE t (x INTEGER, z INTEGER)"
)
def test_composite_pk_constraint_autoinc_first(self):
m = MetaData()
t = Table(
't', m,
Column('a', Integer, primary_key=True),
Column('b', Integer, primary_key=True, autoincrement=True)
)
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t ("
"a INTEGER NOT NULL, "
"b INTEGER NOT NULL, "
"PRIMARY KEY (b, a))"
)
def test_table_no_cols(self):
m = MetaData()
t1 = Table('t1', m)
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE t1 ()"
)
def test_table_no_cols_w_constraint(self):
m = MetaData()
t1 = Table('t1', m, CheckConstraint('a = 1'))
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE t1 (CHECK (a = 1))"
)
def test_table_one_col_w_constraint(self):
m = MetaData()
t1 = Table('t1', m, Column('q', Integer), CheckConstraint('a = 1'))
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE t1 (q INTEGER, CHECK (a = 1))"
)
def test_schema_translate_map_table(self):
m = MetaData()
t1 = Table('t1', m, Column('q', Integer))
t2 = Table('t2', m, Column('q', Integer), schema='foo')
t3 = Table('t3', m, Column('q', Integer), schema='bar')
schema_translate_map = {None: "z", "bar": None, "foo": "bat"}
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE z.t1 (q INTEGER)",
schema_translate_map=schema_translate_map
)
self.assert_compile(
schema.CreateTable(t2),
"CREATE TABLE bat.t2 (q INTEGER)",
schema_translate_map=schema_translate_map
)
self.assert_compile(
schema.CreateTable(t3),
"CREATE TABLE t3 (q INTEGER)",
schema_translate_map=schema_translate_map
)
def test_schema_translate_map_sequence(self):
s1 = schema.Sequence('s1')
s2 = schema.Sequence('s2', schema='foo')
s3 = schema.Sequence('s3', schema='bar')
schema_translate_map = {None: "z", "bar": None, "foo": "bat"}
self.assert_compile(
schema.CreateSequence(s1),
"CREATE SEQUENCE z.s1",
schema_translate_map=schema_translate_map
)
self.assert_compile(
schema.CreateSequence(s2),
"CREATE SEQUENCE bat.s2",
schema_translate_map=schema_translate_map
)
self.assert_compile(
schema.CreateSequence(s3),
"CREATE SEQUENCE s3",
schema_translate_map=schema_translate_map
)
class InlineDefaultTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_insert(self):
m = MetaData()
foo = Table('foo', m,
Column('id', Integer))
t = Table('test', m,
Column('col1', Integer, default=func.foo(1)),
Column('col2', Integer, default=select(
[func.coalesce(func.max(foo.c.id))])),
)
self.assert_compile(
t.insert(
inline=True, values={}),
"INSERT INTO test (col1, col2) VALUES (foo(:foo_1), "
"(SELECT coalesce(max(foo.id)) AS coalesce_1 FROM "
"foo))")
def test_update(self):
m = MetaData()
foo = Table('foo', m,
Column('id', Integer))
t = Table('test', m,
Column('col1', Integer, onupdate=func.foo(1)),
Column('col2', Integer, onupdate=select(
[func.coalesce(func.max(foo.c.id))])),
Column('col3', String(30))
)
self.assert_compile(t.update(inline=True, values={'col3': 'foo'}),
"UPDATE test SET col1=foo(:foo_1), col2=(SELECT "
"coalesce(max(foo.id)) AS coalesce_1 FROM foo), "
"col3=:col3")
class SchemaTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_select(self):
self.assert_compile(table4.select(),
"SELECT remote_owner.remotetable.rem_id, "
"remote_owner.remotetable.datatype_id,"
" remote_owner.remotetable.value "
"FROM remote_owner.remotetable")
self.assert_compile(
table4.select(
and_(
table4.c.datatype_id == 7,
table4.c.value == 'hi')),
"SELECT remote_owner.remotetable.rem_id, "
"remote_owner.remotetable.datatype_id,"
" remote_owner.remotetable.value "
"FROM remote_owner.remotetable WHERE "
"remote_owner.remotetable.datatype_id = :datatype_id_1 AND"
" remote_owner.remotetable.value = :value_1")
s = table4.select(and_(table4.c.datatype_id == 7,
table4.c.value == 'hi'), use_labels=True)
self.assert_compile(
s, "SELECT remote_owner.remotetable.rem_id AS"
" remote_owner_remotetable_rem_id, "
"remote_owner.remotetable.datatype_id AS"
" remote_owner_remotetable_datatype_id, "
"remote_owner.remotetable.value "
"AS remote_owner_remotetable_value FROM "
"remote_owner.remotetable WHERE "
"remote_owner.remotetable.datatype_id = :datatype_id_1 AND "
"remote_owner.remotetable.value = :value_1")
# multi-part schema name
self.assert_compile(table5.select(),
'SELECT "dbo.remote_owner".remotetable.rem_id, '
'"dbo.remote_owner".remotetable.datatype_id, '
'"dbo.remote_owner".remotetable.value '
'FROM "dbo.remote_owner".remotetable'
)
# multi-part schema name labels - convert '.' to '_'
self.assert_compile(table5.select(use_labels=True),
'SELECT "dbo.remote_owner".remotetable.rem_id AS'
' dbo_remote_owner_remotetable_rem_id, '
'"dbo.remote_owner".remotetable.datatype_id'
' AS dbo_remote_owner_remotetable_datatype_id,'
' "dbo.remote_owner".remotetable.value AS '
'dbo_remote_owner_remotetable_value FROM'
' "dbo.remote_owner".remotetable'
)
def test_schema_translate_select(self):
schema_translate_map = {"remote_owner": "foob", None: 'bar'}
self.assert_compile(
table1.select().where(table1.c.name == 'hi'),
"SELECT bar.mytable.myid, bar.mytable.name, "
"bar.mytable.description FROM bar.mytable "
"WHERE bar.mytable.name = :name_1",
schema_translate_map=schema_translate_map
)
self.assert_compile(
table4.select().where(table4.c.value == 'hi'),
"SELECT foob.remotetable.rem_id, foob.remotetable.datatype_id, "
"foob.remotetable.value FROM foob.remotetable "
"WHERE foob.remotetable.value = :value_1",
schema_translate_map=schema_translate_map
)
schema_translate_map = {"remote_owner": "foob"}
self.assert_compile(
select([
table1, table4
]).select_from(
join(table1, table4, table1.c.myid == table4.c.rem_id)
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"foob.remotetable.rem_id, foob.remotetable.datatype_id, "
"foob.remotetable.value FROM mytable JOIN foob.remotetable "
"ON foob.remotetable.rem_id = mytable.myid",
schema_translate_map=schema_translate_map
)
def test_schema_translate_crud(self):
schema_translate_map = {"remote_owner": "foob", None: 'bar'}
self.assert_compile(
table1.insert().values(description='foo'),
"INSERT INTO bar.mytable (description) VALUES (:description)",
schema_translate_map=schema_translate_map
)
self.assert_compile(
table1.update().where(table1.c.name == 'hi').
values(description='foo'),
"UPDATE bar.mytable SET description=:description "
"WHERE bar.mytable.name = :name_1",
schema_translate_map=schema_translate_map
)
self.assert_compile(
table1.delete().where(table1.c.name == 'hi'),
"DELETE FROM bar.mytable WHERE bar.mytable.name = :name_1",
schema_translate_map=schema_translate_map
)
self.assert_compile(
table4.insert().values(value='there'),
"INSERT INTO foob.remotetable (value) VALUES (:value)",
schema_translate_map=schema_translate_map
)
self.assert_compile(
table4.update().where(table4.c.value == 'hi').
values(value='there'),
"UPDATE foob.remotetable SET value=:value "
"WHERE foob.remotetable.value = :value_1",
schema_translate_map=schema_translate_map
)
self.assert_compile(
table4.delete().where(table4.c.value == 'hi'),
"DELETE FROM foob.remotetable WHERE "
"foob.remotetable.value = :value_1",
schema_translate_map=schema_translate_map
)
def test_alias(self):
a = alias(table4, 'remtable')
self.assert_compile(a.select(a.c.datatype_id == 7),
"SELECT remtable.rem_id, remtable.datatype_id, "
"remtable.value FROM"
" remote_owner.remotetable AS remtable "
"WHERE remtable.datatype_id = :datatype_id_1")
def test_update(self):
self.assert_compile(
table4.update(table4.c.value == 'test',
values={table4.c.datatype_id: 12}),
"UPDATE remote_owner.remotetable SET datatype_id=:datatype_id "
"WHERE remote_owner.remotetable.value = :value_1")
def test_insert(self):
self.assert_compile(table4.insert(values=(2, 5, 'test')),
"INSERT INTO remote_owner.remotetable "
"(rem_id, datatype_id, value) VALUES "
"(:rem_id, :datatype_id, :value)")
class CorrelateTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_dont_overcorrelate(self):
self.assert_compile(select([table1], from_obj=[table1,
table1.select()]),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable, (SELECT "
"mytable.myid AS myid, mytable.name AS "
"name, mytable.description AS description "
"FROM mytable)")
def _fixture(self):
t1 = table('t1', column('a'))
t2 = table('t2', column('a'))
return t1, t2, select([t1]).where(t1.c.a == t2.c.a)
def _assert_where_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a FROM t2 WHERE t2.a = "
"(SELECT t1.a FROM t1 WHERE t1.a = t2.a)")
def _assert_where_all_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t1.a, t2.a FROM t1, t2 WHERE t2.a = "
"(SELECT t1.a WHERE t1.a = t2.a)")
# note there's no more "backwards" correlation after
# we've done #2746
# def _assert_where_backwards_correlated(self, stmt):
# self.assert_compile(
# stmt,
# "SELECT t2.a FROM t2 WHERE t2.a = "
# "(SELECT t1.a FROM t2 WHERE t1.a = t2.a)")
# def _assert_column_backwards_correlated(self, stmt):
# self.assert_compile(stmt,
# "SELECT t2.a, (SELECT t1.a FROM t2 WHERE t1.a = t2.a) "
# "AS anon_1 FROM t2")
def _assert_column_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a, (SELECT t1.a FROM t1 WHERE t1.a = t2.a) "
"AS anon_1 FROM t2")
def _assert_column_all_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t1.a, t2.a, "
"(SELECT t1.a WHERE t1.a = t2.a) AS anon_1 FROM t1, t2")
def _assert_having_correlated(self, stmt):
self.assert_compile(stmt,
"SELECT t2.a FROM t2 HAVING t2.a = "
"(SELECT t1.a FROM t1 WHERE t1.a = t2.a)")
def _assert_from_uncorrelated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a, anon_1.a FROM t2, "
"(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a) AS anon_1")
def _assert_from_all_uncorrelated(self, stmt):
self.assert_compile(
stmt,
"SELECT t1.a, t2.a, anon_1.a FROM t1, t2, "
"(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a) AS anon_1")
def _assert_where_uncorrelated(self, stmt):
self.assert_compile(stmt,
"SELECT t2.a FROM t2 WHERE t2.a = "
"(SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a)")
def _assert_column_uncorrelated(self, stmt):
self.assert_compile(stmt,
"SELECT t2.a, (SELECT t1.a FROM t1, t2 "
"WHERE t1.a = t2.a) AS anon_1 FROM t2")
def _assert_having_uncorrelated(self, stmt):
self.assert_compile(stmt,
"SELECT t2.a FROM t2 HAVING t2.a = "
"(SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a)")
def _assert_where_single_full_correlated(self, stmt):
self.assert_compile(stmt,
"SELECT t1.a FROM t1 WHERE t1.a = (SELECT t1.a)")
def test_correlate_semiauto_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_correlated(
select([t2]).where(t2.c.a == s1.correlate(t2)))
def test_correlate_semiauto_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_correlated(
select([t2, s1.correlate(t2).as_scalar()]))
def test_correlate_semiauto_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.correlate(t2).alias()]))
def test_correlate_semiauto_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_correlated(
select([t2]).having(t2.c.a == s1.correlate(t2)))
def test_correlate_except_inclusion_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_correlated(
select([t2]).where(t2.c.a == s1.correlate_except(t1)))
def test_correlate_except_exclusion_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_uncorrelated(
select([t2]).where(t2.c.a == s1.correlate_except(t2)))
def test_correlate_except_inclusion_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_correlated(
select([t2, s1.correlate_except(t1).as_scalar()]))
def test_correlate_except_exclusion_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_uncorrelated(
select([t2, s1.correlate_except(t2).as_scalar()]))
def test_correlate_except_inclusion_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.correlate_except(t1).alias()]))
def test_correlate_except_exclusion_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.correlate_except(t2).alias()]))
def test_correlate_except_none(self):
t1, t2, s1 = self._fixture()
self._assert_where_all_correlated(
select([t1, t2]).where(t2.c.a == s1.correlate_except(None)))
def test_correlate_except_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_correlated(
select([t2]).having(t2.c.a == s1.correlate_except(t1)))
def test_correlate_auto_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_correlated(
select([t2]).where(t2.c.a == s1))
def test_correlate_auto_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_correlated(
select([t2, s1.as_scalar()]))
def test_correlate_auto_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.alias()]))
def test_correlate_auto_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_correlated(
select([t2]).having(t2.c.a == s1))
def test_correlate_disabled_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_uncorrelated(
select([t2]).where(t2.c.a == s1.correlate(None)))
def test_correlate_disabled_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_uncorrelated(
select([t2, s1.correlate(None).as_scalar()]))
def test_correlate_disabled_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.correlate(None).alias()]))
def test_correlate_disabled_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_uncorrelated(
select([t2]).having(t2.c.a == s1.correlate(None)))
def test_correlate_all_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_all_correlated(
select([t1, t2]).where(t2.c.a == s1.correlate(t1, t2)))
def test_correlate_all_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_all_correlated(
select([t1, t2, s1.correlate(t1, t2).as_scalar()]))
def test_correlate_all_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_all_uncorrelated(
select([t1, t2, s1.correlate(t1, t2).alias()]))
def test_correlate_where_all_unintentional(self):
t1, t2, s1 = self._fixture()
assert_raises_message(
exc.InvalidRequestError,
"returned no FROM clauses due to auto-correlation",
select([t1, t2]).where(t2.c.a == s1).compile
)
def test_correlate_from_all_ok(self):
t1, t2, s1 = self._fixture()
self.assert_compile(
select([t1, t2, s1]),
"SELECT t1.a, t2.a, a FROM t1, t2, "
"(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a)"
)
def test_correlate_auto_where_singlefrom(self):
t1, t2, s1 = self._fixture()
s = select([t1.c.a])
s2 = select([t1]).where(t1.c.a == s)
self.assert_compile(s2,
"SELECT t1.a FROM t1 WHERE t1.a = "
"(SELECT t1.a FROM t1)")
def test_correlate_semiauto_where_singlefrom(self):
t1, t2, s1 = self._fixture()
s = select([t1.c.a])
s2 = select([t1]).where(t1.c.a == s.correlate(t1))
self._assert_where_single_full_correlated(s2)
def test_correlate_except_semiauto_where_singlefrom(self):
t1, t2, s1 = self._fixture()
s = select([t1.c.a])
s2 = select([t1]).where(t1.c.a == s.correlate_except(t2))
self._assert_where_single_full_correlated(s2)
def test_correlate_alone_noeffect(self):
# new as of #2668
t1, t2, s1 = self._fixture()
self.assert_compile(s1.correlate(t1, t2),
"SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a")
def test_correlate_except_froms(self):
# new as of #2748
t1 = table('t1', column('a'))
t2 = table('t2', column('a'), column('b'))
s = select([t2.c.b]).where(t1.c.a == t2.c.a)
s = s.correlate_except(t2).alias('s')
s2 = select([func.foo(s.c.b)]).as_scalar()
s3 = select([t1], order_by=s2)
self.assert_compile(
s3, "SELECT t1.a FROM t1 ORDER BY "
"(SELECT foo(s.b) AS foo_1 FROM "
"(SELECT t2.b AS b FROM t2 WHERE t1.a = t2.a) AS s)")
def test_multilevel_froms_correlation(self):
# new as of #2748
p = table('parent', column('id'))
c = table('child', column('id'), column('parent_id'), column('pos'))
s = c.select().where(
c.c.parent_id == p.c.id).order_by(
c.c.pos).limit(1)
s = s.correlate(p)
s = exists().select_from(s).where(s.c.id == 1)
s = select([p]).where(s)
self.assert_compile(
s, "SELECT parent.id FROM parent WHERE EXISTS (SELECT * "
"FROM (SELECT child.id AS id, child.parent_id AS parent_id, "
"child.pos AS pos FROM child WHERE child.parent_id = parent.id "
"ORDER BY child.pos LIMIT :param_1) WHERE id = :id_1)")
def test_no_contextless_correlate_except(self):
# new as of #2748
t1 = table('t1', column('x'))
t2 = table('t2', column('y'))
t3 = table('t3', column('z'))
s = select([t1]).where(t1.c.x == t2.c.y).\
where(t2.c.y == t3.c.z).correlate_except(t1)
self.assert_compile(
s,
"SELECT t1.x FROM t1, t2, t3 WHERE t1.x = t2.y AND t2.y = t3.z")
def test_multilevel_implicit_correlation_disabled(self):
# test that implicit correlation with multilevel WHERE correlation
# behaves like 0.8.1, 0.7 (i.e. doesn't happen)
t1 = table('t1', column('x'))
t2 = table('t2', column('y'))
t3 = table('t3', column('z'))
s = select([t1.c.x]).where(t1.c.x == t2.c.y)
s2 = select([t3.c.z]).where(t3.c.z == s.as_scalar())
s3 = select([t1]).where(t1.c.x == s2.as_scalar())
self.assert_compile(s3,
"SELECT t1.x FROM t1 "
"WHERE t1.x = (SELECT t3.z "
"FROM t3 "
"WHERE t3.z = (SELECT t1.x "
"FROM t1, t2 "
"WHERE t1.x = t2.y))"
)
def test_from_implicit_correlation_disabled(self):
# test that implicit correlation with immediate and
# multilevel FROM clauses behaves like 0.8.1 (i.e. doesn't happen)
t1 = table('t1', column('x'))
t2 = table('t2', column('y'))
t3 = table('t3', column('z'))
s = select([t1.c.x]).where(t1.c.x == t2.c.y)
s2 = select([t2, s])
s3 = select([t1, s2])
self.assert_compile(s3,
"SELECT t1.x, y, x FROM t1, "
"(SELECT t2.y AS y, x FROM t2, "
"(SELECT t1.x AS x FROM t1, t2 WHERE t1.x = t2.y))"
)
class CoercionTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = default.DefaultDialect(supports_native_boolean=True)
def _fixture(self):
m = MetaData()
return Table('foo', m,
Column('id', Integer))
bool_table = table('t', column('x', Boolean))
def test_coerce_bool_where(self):
self.assert_compile(
select([self.bool_table]).where(self.bool_table.c.x),
"SELECT t.x FROM t WHERE t.x"
)
def test_coerce_bool_where_non_native(self):
self.assert_compile(
select([self.bool_table]).where(self.bool_table.c.x),
"SELECT t.x FROM t WHERE t.x = 1",
dialect=default.DefaultDialect(supports_native_boolean=False)
)
self.assert_compile(
select([self.bool_table]).where(~self.bool_table.c.x),
"SELECT t.x FROM t WHERE t.x = 0",
dialect=default.DefaultDialect(supports_native_boolean=False)
)
def test_null_constant(self):
self.assert_compile(_literal_as_text(None), "NULL")
def test_false_constant(self):
self.assert_compile(_literal_as_text(False), "false")
def test_true_constant(self):
self.assert_compile(_literal_as_text(True), "true")
def test_val_and_false(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, False),
"false")
def test_val_and_true_coerced(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, True),
"foo.id = :id_1")
def test_val_is_null_coerced(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == None),
"foo.id IS NULL")
def test_val_and_None(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, None),
"foo.id = :id_1 AND NULL")
def test_None_and_val(self):
t = self._fixture()
self.assert_compile(and_(None, t.c.id == 1),
"NULL AND foo.id = :id_1")
def test_None_and_nothing(self):
# current convention is None in and_()
# returns None May want
# to revise this at some point.
self.assert_compile(
and_(None), "NULL")
def test_val_and_null(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, null()),
"foo.id = :id_1 AND NULL")
class ResultMapTest(fixtures.TestBase):
"""test the behavior of the 'entry stack' and the determination
when the result_map needs to be populated.
"""
def test_compound_populates(self):
t = Table('t', MetaData(), Column('a', Integer), Column('b', Integer))
stmt = select([t]).union(select([t]))
comp = stmt.compile()
eq_(
comp._create_result_map(),
{'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type),
'b': ('b', (t.c.b, 'b', 'b'), t.c.b.type)}
)
def test_compound_not_toplevel_doesnt_populate(self):
t = Table('t', MetaData(), Column('a', Integer), Column('b', Integer))
subq = select([t]).union(select([t]))
stmt = select([t.c.a]).select_from(t.join(subq, t.c.a == subq.c.a))
comp = stmt.compile()
eq_(
comp._create_result_map(),
{'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type)}
)
def test_compound_only_top_populates(self):
t = Table('t', MetaData(), Column('a', Integer), Column('b', Integer))
stmt = select([t.c.a]).union(select([t.c.b]))
comp = stmt.compile()
eq_(
comp._create_result_map(),
{'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type)},
)
def test_label_plus_element(self):
t = Table('t', MetaData(), Column('a', Integer))
l1 = t.c.a.label('bar')
tc = type_coerce(t.c.a, String)
stmt = select([t.c.a, l1, tc])
comp = stmt.compile()
tc_anon_label = comp._create_result_map()['anon_1'][1][0]
eq_(
comp._create_result_map(),
{
'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type),
'bar': ('bar', (l1, 'bar'), l1.type),
'anon_1': (
'%%(%d anon)s' % id(tc),
(tc_anon_label, 'anon_1', tc), tc.type),
},
)
def test_label_conflict_union(self):
t1 = Table('t1', MetaData(), Column('a', Integer),
Column('b', Integer))
t2 = Table('t2', MetaData(), Column('t1_a', Integer))
union = select([t2]).union(select([t2])).alias()
t1_alias = t1.alias()
stmt = select([t1, t1_alias]).select_from(
t1.join(union, t1.c.a == union.c.t1_a)).apply_labels()
comp = stmt.compile()
eq_(
set(comp._create_result_map()),
set(['t1_1_b', 't1_1_a', 't1_a', 't1_b'])
)
is_(
comp._create_result_map()['t1_a'][1][2], t1.c.a
)
def test_insert_with_select_values(self):
astring = Column('a', String)
aint = Column('a', Integer)
m = MetaData()
Table('t1', m, astring)
t2 = Table('t2', m, aint)
stmt = t2.insert().values(a=select([astring])).returning(aint)
comp = stmt.compile(dialect=postgresql.dialect())
eq_(
comp._create_result_map(),
{'a': ('a', (aint, 'a', 'a'), aint.type)}
)
def test_insert_from_select(self):
astring = Column('a', String)
aint = Column('a', Integer)
m = MetaData()
Table('t1', m, astring)
t2 = Table('t2', m, aint)
stmt = t2.insert().from_select(['a'], select([astring])).\
returning(aint)
comp = stmt.compile(dialect=postgresql.dialect())
eq_(
comp._create_result_map(),
{'a': ('a', (aint, 'a', 'a'), aint.type)}
)
def test_nested_api(self):
from sqlalchemy.engine.result import ResultMetaData
stmt2 = select([table2])
stmt1 = select([table1]).select_from(stmt2)
contexts = {}
int_ = Integer()
class MyCompiler(compiler.SQLCompiler):
def visit_select(self, stmt, *arg, **kw):
if stmt is stmt2:
with self._nested_result() as nested:
contexts[stmt2] = nested
text = super(MyCompiler, self).visit_select(stmt2)
self._add_to_result_map("k1", "k1", (1, 2, 3), int_)
else:
text = super(MyCompiler, self).visit_select(
stmt, *arg, **kw)
self._add_to_result_map("k2", "k2", (3, 4, 5), int_)
return text
comp = MyCompiler(default.DefaultDialect(), stmt1)
eq_(
ResultMetaData._create_result_map(contexts[stmt2][0]),
{
'otherid': (
'otherid',
(table2.c.otherid, 'otherid', 'otherid'),
table2.c.otherid.type),
'othername': (
'othername',
(table2.c.othername, 'othername', 'othername'),
table2.c.othername.type),
'k1': ('k1', (1, 2, 3), int_)
}
)
eq_(
comp._create_result_map(),
{
'myid': (
'myid',
(table1.c.myid, 'myid', 'myid'), table1.c.myid.type
),
'k2': ('k2', (3, 4, 5), int_),
'name': (
'name', (table1.c.name, 'name', 'name'),
table1.c.name.type),
'description': (
'description',
(table1.c.description, 'description', 'description'),
table1.c.description.type)}
)
def test_select_wraps_for_translate_ambiguity(self):
# test for issue #3657
t = table('a', column('x'), column('y'), column('z'))
l1, l2, l3 = t.c.z.label('a'), t.c.x.label('b'), t.c.x.label('c')
orig = [t.c.x, t.c.y, l1, l2, l3]
stmt = select(orig)
wrapped = stmt._generate()
wrapped = wrapped.column(
func.ROW_NUMBER().over(order_by=t.c.z)).alias()
wrapped_again = select([c for c in wrapped.c])
compiled = wrapped_again.compile(
compile_kwargs={'select_wraps_for': stmt})
proxied = [obj[0] for (k, n, obj, type_) in compiled._result_columns]
for orig_obj, proxied_obj in zip(
orig,
proxied
):
is_(orig_obj, proxied_obj)
def test_select_wraps_for_translate_ambiguity_dupe_cols(self):
# test for issue #3657
t = table('a', column('x'), column('y'), column('z'))
l1, l2, l3 = t.c.z.label('a'), t.c.x.label('b'), t.c.x.label('c')
orig = [t.c.x, t.c.y, l1, l2, l3]
# create the statement with some duplicate columns. right now
# the behavior is that these redundant columns are deduped.
stmt = select([t.c.x, t.c.y, l1, t.c.y, l2, t.c.x, l3])
# so the statement has 7 inner columns...
eq_(len(list(stmt.inner_columns)), 7)
# but only exposes 5 of them, the other two are dupes of x and y
eq_(len(stmt.c), 5)
# and when it generates a SELECT it will also render only 5
eq_(len(stmt._columns_plus_names), 5)
wrapped = stmt._generate()
wrapped = wrapped.column(
func.ROW_NUMBER().over(order_by=t.c.z)).alias()
# so when we wrap here we're going to have only 5 columns
wrapped_again = select([c for c in wrapped.c])
# so the compiler logic that matches up the "wrapper" to the
# "select_wraps_for" can't use inner_columns to match because
# these collections are not the same
compiled = wrapped_again.compile(
compile_kwargs={'select_wraps_for': stmt})
proxied = [obj[0] for (k, n, obj, type_) in compiled._result_columns]
for orig_obj, proxied_obj in zip(
orig,
proxied
):
is_(orig_obj, proxied_obj)
| sandan/sqlalchemy | test/sql/test_compiler.py | Python | mit | 148,745 |
# coding: utf-8
from django.views.generic import CreateView, UpdateView, DeleteView
from django.http import HttpResponse, HttpResponseRedirect
from django.template.loader import render_to_string
from django.template import RequestContext
from django.core.serializers.json import DjangoJSONEncoder
from django.conf import settings
try:
import json
except ImportError:
from django.utils import simplejson as json
class JSONResponseMixin(object):
"""
This is a slightly modified version from django-braces project
(https://github.com/brack3t/django-braces)
"""
content_type = None
json_dumps_kwargs = None
def get_content_type(self):
return self.content_type or u"application/json"
def get_json_dumps_kwargs(self):
if self.json_dumps_kwargs is None:
self.json_dumps_kwargs = {}
self.json_dumps_kwargs.setdefault(u'ensure_ascii', False)
return self.json_dumps_kwargs
def render_json_response(self, context_dict, status=200):
"""
Limited serialization for shipping plain data. Do not use for models
or other complex or custom objects.
"""
json_context = json.dumps(
context_dict,
cls=DjangoJSONEncoder,
**self.get_json_dumps_kwargs()
).encode(u'utf-8')
return HttpResponse(
json_context,
content_type=self.get_content_type(),
status=status
)
class AjaxFormMixin(JSONResponseMixin):
message_template = None
def pre_save(self):
pass
def post_save(self):
pass
def form_valid(self, form):
"""
If the request is ajax, save the form and return a json response.
Otherwise return super as expected.
"""
self.object = form.save(commit=False)
self.pre_save()
self.object.save()
if hasattr(form, 'save_m2m'):
form.save_m2m()
self.post_save()
if self.request.is_ajax():
return self.render_json_response(self.get_success_result())
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
"""
We have errors in the form. If ajax, return them as json.
Otherwise, proceed as normal.
"""
if self.request.is_ajax():
return self.render_json_response(self.get_error_result(form))
return super(AjaxFormMixin, self).form_invalid(form)
def get_message_template_context(self):
return {
'instance': self.object,
'object': self.object
}
def get_message_template_html(self):
return render_to_string(
self.message_template,
self.get_message_template_context(),
context_instance=RequestContext(self.request)
)
def get_response_message(self):
message = ''
if self.message_template:
message = self.get_message_template_html()
return message
def get_success_result(self):
return {'status': 'ok', 'message': self.get_response_message()}
def get_error_result(self, form):
html = render_to_string(
self.template_name,
self.get_context_data(form=form),
context_instance=RequestContext(self.request)
)
return {'status': 'error', 'message': html}
DEFAULT_FORM_TEMPLATE = getattr(settings, "FM_DEFAULT_FORM_TEMPLATE", "fm/form.html")
class AjaxCreateView(AjaxFormMixin, CreateView):
template_name = DEFAULT_FORM_TEMPLATE
class AjaxUpdateView(AjaxFormMixin, UpdateView):
template_name = DEFAULT_FORM_TEMPLATE
class AjaxDeleteView(JSONResponseMixin, DeleteView):
def pre_delete(self):
pass
def post_delete(self):
pass
def get_success_result(self):
return {'status': 'ok'}
def delete(self, request, *args, **kwargs):
"""
The same logic as in DeleteView but some hooks and
JSON response in case of AJAX request
"""
self.object = self.get_object()
self.pre_delete()
self.object.delete()
self.post_delete()
if self.request.is_ajax():
return self.render_json_response(self.get_success_result())
success_url = self.get_success_url()
return HttpResponseRedirect(success_url) | kobox/achilles.pl | src/static/fm/views.py | Python | mit | 4,377 |
# Copyright (c) 2015 Boocock James <[email protected]>
# Author: Boocock James <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from fine_mapping_pipeline.finemap.finemap import run_finemap, remove_surrogates, _write_matrix, _write_zscores
import logging
logging.basicConfig(level=logging.INFO)
def test_remove_surrogate(tmpdir):
input_matrix = 'tests/finemap_data/test.matrix'
input_zscore = 'tests/finemap_data/test.Z'
surrogates_out = 'tests/finemap_data/out.surro'
(matrix, zscores) = remove_surrogates(input_matrix,input_zscore, surrogates_out)
_write_matrix(matrix, "tests/finemap_data/out.matrix")
_write_zscores(zscores, "tests/finemap_data/out.zscores")
assert 1 == 2
| theboocock/fine_mapping_pipeline | tests/test_run_finemap.py | Python | mit | 1,755 |
import os
import json
import six
from ddt import ddt, data, file_data, is_hash_randomized
from nose.tools import assert_equal, assert_is_not_none, assert_raises
@ddt
class Dummy(object):
"""
Dummy class to test the data decorator on
"""
@data(1, 2, 3, 4)
def test_something(self, value):
return value
@ddt
class DummyInvalidIdentifier():
"""
Dummy class to test the data decorator receiving values invalid characters
indentifiers
"""
@data('32v2 g #Gmw845h$W b53wi.')
def test_data_with_invalid_identifier(self, value):
return value
@ddt
class FileDataDummy(object):
"""
Dummy class to test the file_data decorator on
"""
@file_data("test_data_dict.json")
def test_something_again(self, value):
return value
@ddt
class FileDataMissingDummy(object):
"""
Dummy class to test the file_data decorator on when
JSON file is missing
"""
@file_data("test_data_dict_missing.json")
def test_something_again(self, value):
return value
def test_data_decorator():
"""
Test the ``data`` method decorator
"""
def hello():
pass
pre_size = len(hello.__dict__)
keys = set(hello.__dict__.keys())
data_hello = data(1, 2)(hello)
dh_keys = set(data_hello.__dict__.keys())
post_size = len(data_hello.__dict__)
assert_equal(post_size, pre_size + 1)
extra_attrs = dh_keys - keys
assert_equal(len(extra_attrs), 1)
extra_attr = extra_attrs.pop()
assert_equal(getattr(data_hello, extra_attr), (1, 2))
def test_file_data_decorator_with_dict():
"""
Test the ``file_data`` method decorator
"""
def hello():
pass
pre_size = len(hello.__dict__)
keys = set(hello.__dict__.keys())
data_hello = data("test_data_dict.json")(hello)
dh_keys = set(data_hello.__dict__.keys())
post_size = len(data_hello.__dict__)
assert_equal(post_size, pre_size + 1)
extra_attrs = dh_keys - keys
assert_equal(len(extra_attrs), 1)
extra_attr = extra_attrs.pop()
assert_equal(getattr(data_hello, extra_attr), ("test_data_dict.json",))
is_test = lambda x: x.startswith('test_')
def test_ddt():
"""
Test the ``ddt`` class decorator
"""
tests = len(list(filter(is_test, Dummy.__dict__)))
assert_equal(tests, 4)
def test_file_data_test_creation():
"""
Test that the ``file_data`` decorator creates two tests
"""
tests = len(list(filter(is_test, FileDataDummy.__dict__)))
assert_equal(tests, 2)
def test_file_data_test_names_dict():
"""
Test that ``file_data`` creates tests with the correct name
Name is the the function name plus the key in the JSON data,
when it is parsed as a dictionary.
"""
tests = set(filter(is_test, FileDataDummy.__dict__))
tests_dir = os.path.dirname(__file__)
test_data_path = os.path.join(tests_dir, 'test_data_dict.json')
test_data = json.loads(open(test_data_path).read())
created_tests = set([
"test_something_again_{0}_{1}".format(index + 1, name)
for index, name in enumerate(test_data.keys())
])
assert_equal(tests, created_tests)
def test_feed_data_data():
"""
Test that data is fed to the decorated tests
"""
tests = filter(is_test, Dummy.__dict__)
values = []
obj = Dummy()
for test in tests:
method = getattr(obj, test)
values.append(method())
assert_equal(set(values), set([1, 2, 3, 4]))
def test_feed_data_file_data():
"""
Test that data is fed to the decorated tests from a file
"""
tests = filter(is_test, FileDataDummy.__dict__)
values = []
obj = FileDataDummy()
for test in tests:
method = getattr(obj, test)
values.extend(method())
assert_equal(set(values), set([10, 12, 15, 15, 12, 50]))
def test_feed_data_file_data_missing_json():
"""
Test that a ValueError is raised
"""
tests = filter(is_test, FileDataMissingDummy.__dict__)
obj = FileDataMissingDummy()
for test in tests:
method = getattr(obj, test)
assert_raises(ValueError, method)
def test_ddt_data_name_attribute():
"""
Test the ``__name__`` attribute handling of ``data`` items with ``ddt``
"""
def hello():
pass
class Myint(int):
pass
class Mytest(object):
pass
d1 = Myint(1)
d1.__name__ = 'data1'
d2 = Myint(2)
data_hello = data(d1, d2)(hello)
setattr(Mytest, 'test_hello', data_hello)
ddt_mytest = ddt(Mytest)
assert_is_not_none(getattr(ddt_mytest, 'test_hello_1_data1'))
assert_is_not_none(getattr(ddt_mytest, 'test_hello_2_2'))
def test_ddt_data_unicode():
"""
Test that unicode strings are converted to function names correctly
"""
def hello():
pass
# We test unicode support separately for python 2 and 3
if six.PY2:
@ddt
class Mytest(object):
@data(u'ascii', u'non-ascii-\N{SNOWMAN}', {u'\N{SNOWMAN}': 'data'})
def test_hello(self, val):
pass
assert_is_not_none(getattr(Mytest, 'test_hello_1_ascii'))
assert_is_not_none(getattr(Mytest, 'test_hello_2_non_ascii__u2603'))
if is_hash_randomized():
assert_is_not_none(getattr(Mytest, 'test_hello_3'))
else:
assert_is_not_none(getattr(Mytest,
'test_hello_3__u__u2603____data__'))
elif six.PY3:
@ddt
class Mytest(object):
@data('ascii', 'non-ascii-\N{SNOWMAN}', {'\N{SNOWMAN}': 'data'})
def test_hello(self, val):
pass
assert_is_not_none(getattr(Mytest, 'test_hello_1_ascii'))
assert_is_not_none(getattr(Mytest, 'test_hello_2_non_ascii__'))
if is_hash_randomized():
assert_is_not_none(getattr(Mytest, 'test_hello_3'))
else:
assert_is_not_none(getattr(Mytest, 'test_hello_3________data__'))
def test_feed_data_with_invalid_identifier():
"""
Test that data is fed to the decorated tests
"""
tests = list(filter(is_test, DummyInvalidIdentifier.__dict__))
assert_equal(len(tests), 1)
obj = DummyInvalidIdentifier()
method = getattr(obj, tests[0])
assert_equal(
method.__name__,
'test_data_with_invalid_identifier_1_32v2_g__Gmw845h_W_b53wi_'
)
assert_equal(method(), '32v2 g #Gmw845h$W b53wi.')
| domidimi/ddt | test/test_functional.py | Python | mit | 6,480 |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and Contributors
# License: MIT. See LICENSE
# import frappe
import unittest
class TestModuleOnboarding(unittest.TestCase):
pass
| frappe/frappe | frappe/desk/doctype/module_onboarding/test_module_onboarding.py | Python | mit | 197 |
#!/usr/bin/env python
from icqsol.bem.icqPotentialIntegrals import PotentialIntegrals
from icqsol.bem.icqLaplaceMatrices import getFullyQualifiedSharedLibraryName
import numpy
def testObserverOnA(order):
paSrc = numpy.array([0., 0., 0.])
pbSrc = numpy.array([1., 0., 0.])
pcSrc = numpy.array([0., 1., 0.])
xObs = paSrc
integral = PotentialIntegrals(xObs, pbSrc, pcSrc, order).getIntegralOneOverR()
exact = numpy.sqrt(2.) * numpy.arcsinh(1.)
print('testObserverOnA: order = {0} integral = {1} exact = {2} error = {3}'.format(\
order, integral, exact, integral - exact))
def testObserverOnB(order):
paSrc = numpy.array([0., 0., 0.])
pbSrc = numpy.array([1., 0., 0.])
pcSrc = numpy.array([0., 1., 0.])
xObs = pbSrc
integral = PotentialIntegrals(xObs, pcSrc, paSrc, order).getIntegralOneOverR()
exact = numpy.arcsinh(1.)
print('testObserverOnB: order = {0} integral = {1} exact = {2} error = {3}'.format(\
order, integral, exact, integral - exact))
def testObserverOnC(order):
paSrc = numpy.array([0., 0., 0.])
pbSrc = numpy.array([1., 0., 0.])
pcSrc = numpy.array([0., 1., 0.])
xObs = pcSrc
integral = PotentialIntegrals(xObs, paSrc, pbSrc, order).getIntegralOneOverR()
exact = numpy.arcsinh(1.)
print('testObserverOnC: order = {0} integral = {1} exact = {2} error = {3}'.format(\
order, integral, exact, integral - exact))
def testOffDiagonal2Triangles():
import vtk
import sys
import pkg_resources
PY_MAJOR_VERSION = sys.version_info[0]
from ctypes import cdll, c_long, POINTER, c_double
pdata = vtk.vtkPolyData()
points = vtk.vtkPoints()
points.SetNumberOfPoints(4)
points.SetPoint(0, (0., 0., 0.))
points.SetPoint(1, (1., 0., 0.))
points.SetPoint(2, (1., 1., 0.))
points.SetPoint(3, (0., 1., 0.))
pdata.SetPoints(points)
pdata.Allocate(2, 1)
ptIds = vtk.vtkIdList()
ptIds.SetNumberOfIds(3)
ptIds.SetId(0, 0); ptIds.SetId(1, 1); ptIds.SetId(2, 3)
pdata.InsertNextCell(vtk.VTK_POLYGON, ptIds)
ptIds.SetId(0, 1); ptIds.SetId(1, 2); ptIds.SetId(2, 3)
pdata.InsertNextCell(vtk.VTK_POLYGON, ptIds)
addr = int(pdata.GetAddressAsString('vtkPolyData')[5:], 0)
gMat = numpy.zeros((2,2), numpy.float64)
if PY_MAJOR_VERSION < 3:
fullyQualifiedLibName = pkg_resources.resource_filename('icqsol', 'icqLaplaceMatricesCpp.so')
else:
libName = pkg_resources.resource_filename('icqsol', 'icqLaplaceMatricesCpp')
fullyQualifiedLibName = getFullyQualifiedSharedLibraryName(libName)
lib = cdll.LoadLibrary(fullyQualifiedLibName)
lib.computeOffDiagonalTerms(c_long(addr),
gMat.ctypes.data_as(POINTER(c_double)))
exact = numpy.array([[0, -0.07635909342383773],[-0.07635909342383773, 0]])
print(gMat)
print(exact)
print('error: {0}'.format(gMat - exact))
if __name__ == '__main__':
for order in range(1, 6):
testObserverOnA(order)
testObserverOnB(order)
testObserverOnC(order)
print('-'*80)
testOffDiagonal2Triangles()
| gregvonkuster/icqsol | tests/testPotentialIntegrals.py | Python | mit | 3,128 |
#!/usr/bin/python
#encoding=utf-8
import urllib, urllib2
import cookielib
import re
import time
from random import random
from json import dumps as json_dumps, loads as json_loads
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
project_root_path = os.path.abspath(os.path.join(os.path.dirname(__file__)))
sys.path.append(project_root_path)
from logger.logger import logFactory
logger = logFactory.getLogger(__name__)
class MiaoZuan(object):
"""docstring for MiaoZuan"""
def __init__(self, account_file):
super(MiaoZuan, self).__init__()
self.headers = headers = {
'User-Agent':'IOS_8.1_IPHONE5C',
'm-lng':'113.331639',
'm-ct':'2',
'm-lat':'23.158624',
'm-cw':'320',
'm-iv':'3.0.1',
'm-ch':'568',
'm-cv':'6.5.2',
'm-lt':'1',
'm-nw':'WIFI',
#'Content-Type':'application/json;charset=utf-8'
}
self.accountList = self.get_account_List(account_file)
def get_account_List(self, account_file):
accountList = []
try:
with open(account_file, 'r') as f:
lines = f.readlines()
for line in lines:
user, userName, passWord, imei = line.strip('\n').split(',')
accountList.append([user, userName, passWord, imei])
except Exception as e:
logger.exception(e)
finally:
return accountList
def login(self, userName, passWord, imei):
postdata = urllib.urlencode({
'UserName':userName,
'Password':passWord,
'Imei':imei
})
req = urllib2.Request(
url='http://service.inkey.com/api/Auth/Login',
data=postdata,
headers=self.headers
)
cookie_support = urllib2.HTTPCookieProcessor(cookielib.CookieJar())
opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)
try:
content = urllib2.urlopen(req).read()
resp_dict = json_loads(content)
return resp_dict
except Exception as e:
logger.exception(e)
return {"IsSuccess": False, "Desc": ""}
def pull_SilverAdvert_List(self, categoryId):
postdata = urllib.urlencode({
'CategoryIds':categoryId
})
req = urllib2.Request(
url='http://service.inkey.com/api/SilverAdvert/Pull',
data = postdata,
headers = self.headers
)
try:
content = urllib2.urlopen(req).read()
silverAdvert_pat = re.compile(r'"Id":(.*?),')
silverAdvert_list = re.findall(silverAdvert_pat, content)
logger.debug("categoryId = %s, pull_SilverAdvert_List = %s", categoryId, silverAdvert_list)
except Exception as e:
logger.exception(e)
silverAdvert_list = []
return silverAdvert_list
def viewOne_SilverAdvert_by_advertsID(self, advertsID):
postdata = urllib.urlencode({
'IsGame':"false",
"Id":advertsID
})
req = urllib2.Request(
url='http://service.inkey.com/api/SilverAdvert/GeneratedIntegral',
data = postdata,
headers = self.headers
)
try:
content = urllib2.urlopen(req).read()
logger.debug("view advert id = %s, Response from the server: %s", advertsID, content)
resp_dict = json_loads(content)
return resp_dict
except Exception as e:
logger.exception(e)
return {"IsSuccess": False}
def viewAll_SilverAdverts_by_categoryId(self, categoryId):
silverAdsList = self.pull_SilverAdvert_List(categoryId)
silverAdsList_Count = len(silverAdsList)
total_data_by_categoryId = 0
result_Code = 0
result_Code_31303_count = 0
selectNum = 0
if silverAdsList_Count > 0:
while True:
advertsID = silverAdsList[selectNum]
resp_dict = self.viewOne_SilverAdvert_by_advertsID(advertsID)
selectNum += 1
if selectNum >= silverAdsList_Count:
selectNum -= silverAdsList_Count
if resp_dict["IsSuccess"]:
total_data_by_categoryId += resp_dict["Data"]
logger.debug("get %s more points", resp_dict["Data"])
elif resp_dict["Code"] == 31303:
logger.debug("view advert id = %s, Response from the server: %s", advertsID, resp_dict["Desc"])
result_Code_31303_count += 1
continue
elif resp_dict["Code"] == 31307 or result_Code_31303_count > silverAdsList_Count:
logger.debug("Response from the server: %s", resp_dict["Desc"])
break
time.sleep(12+3*random())
logger.info("categoryId = %s, total_data_by_categoryId = %s" % (categoryId, total_data_by_categoryId))
return [result_Code, total_data_by_categoryId]
def get_all_silvers(self):
total_data = 0
result_Code = 0
categoryIds = [-1, 1, -2, 2, -3, 3, -4, 4, 5, 6, 10]
categoryIds_Count = len(categoryIds)
i = 0
List_Count_equals_0 = 0 #如果获取12次广告,广告数都为零,则切换至下一个帐号
while result_Code != '31307' and List_Count_equals_0 < 12:
categoryId = categoryIds[i]
[result_Code, data_by_categoryId] = self.viewAll_SilverAdverts_by_categoryId(categoryId)
total_data += data_by_categoryId
if result_Code == 0:
List_Count_equals_0 += 1
i += 1
if i >= categoryIds_Count:
i -= categoryIds_Count
return total_data
def start(self):
for account in self.accountList:
user, userName, passWord, imei = account
logger.info("User Iteration Started: %s", user)
login_result_dict = self.login(userName, passWord, imei)
if login_result_dict["IsSuccess"]:
try:
total_data_by_all_categoryIds = self.get_all_silvers()
logger.debug("total_data_by_all_categoryIds: %s" % total_data_by_all_categoryIds)
except Exception as e:
logger.exception(e)
finally:
logger.info("User Iteration Ended: %s", user)
else:
logger.warning("Login failed, login user: %s, error description: %s", user, login_result_dict["Desc"])
logger.info("---------------------------------------------------\n")
def run_forever(self):
while True:
self.start()
time.sleep(4*3600)
if __name__ == '__main__':
account_file = os.path.join(project_root_path, 'Config', 'Accounts.dat')
mz = MiaoZuan(account_file)
mz.run_forever()
| debugtalk/MiaoZuanScripts | MiaoZuanScripts.py | Python | mit | 7,305 |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os.path
import string
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.subdir('subdir', 'sub2')
test.write('build.py', r"""
import sys
contents = open(sys.argv[2], 'rb').read() + open(sys.argv[3], 'rb').read()
file = open(sys.argv[1], 'wb')
file.write(contents)
file.close()
""")
test.write('SConstruct', """
Foo = Builder(action = r'%(_python_)s build.py $TARGET $SOURCES subdir/foo.dep')
Bar = Builder(action = r'%(_python_)s build.py $TARGET $SOURCES subdir/bar.dep')
env = Environment(BUILDERS = { 'Foo' : Foo, 'Bar' : Bar }, SUBDIR='subdir')
env.ParseDepends('foo.d')
env.ParseDepends('bar.d')
env.Foo(target = 'f1.out', source = 'f1.in')
env.Foo(target = 'f2.out', source = 'f2.in')
env.Bar(target = 'subdir/f3.out', source = 'f3.in')
SConscript('subdir/SConscript', "env")
env.Foo(target = 'f5.out', source = 'f5.in')
env.Bar(target = 'sub2/f6.out', source = 'f6.in')
""" % locals())
test.write('foo.d', "f1.out f2.out: %s\n" % os.path.join('subdir', 'foo.dep'))
test.write('bar.d', "%s: %s\nf5.out: sub2" % (os.path.join('subdir', 'f3.out'),
os.path.join('subdir', 'bar.dep')))
test.write(['subdir', 'SConscript'], """
Import("env")
ParseDepends('bar.d')
env.Bar(target = 'f4.out', source = 'f4.in')
""")
test.write(['subdir', 'bar.d'], "f4.out: bar.dep\n")
test.write('f1.in', "f1.in\n")
test.write('f2.in', "f2.in\n")
test.write('f3.in', "f3.in\n")
test.write(['subdir', 'f4.in'], "subdir/f4.in\n")
test.write('f5.in', "f5.in\n")
test.write('f6.in', "f6.in\n")
test.write(['subdir', 'foo.dep'], "subdir/foo.dep 1\n")
test.write(['subdir', 'bar.dep'], "subdir/bar.dep 1\n")
test.run(arguments = '.')
test.must_match('f1.out', "f1.in\nsubdir/foo.dep 1\n")
test.must_match('f2.out', "f2.in\nsubdir/foo.dep 1\n")
test.must_match(['subdir', 'f3.out'], "f3.in\nsubdir/bar.dep 1\n")
test.must_match(['subdir', 'f4.out'], "subdir/f4.in\nsubdir/bar.dep 1\n")
test.must_match('f5.out', "f5.in\nsubdir/foo.dep 1\n")
test.must_match(['sub2', 'f6.out'], "f6.in\nsubdir/bar.dep 1\n")
#
test.write(['subdir', 'foo.dep'], "subdir/foo.dep 2\n")
test.write(['subdir', 'bar.dep'], "subdir/bar.dep 2\n")
test.write('f6.in', "f6.in 2\n")
test.run(arguments = '.')
test.must_match('f1.out', "f1.in\nsubdir/foo.dep 2\n")
test.must_match('f2.out', "f2.in\nsubdir/foo.dep 2\n")
test.must_match(['subdir', 'f3.out'], "f3.in\nsubdir/bar.dep 2\n")
test.must_match(['subdir', 'f4.out'], "subdir/f4.in\nsubdir/bar.dep 2\n")
test.must_match('f5.out', "f5.in\nsubdir/foo.dep 2\n")
test.must_match(['sub2', 'f6.out'], "f6.in 2\nsubdir/bar.dep 2\n")
#
test.write(['subdir', 'foo.dep'], "subdir/foo.dep 3\n")
test.run(arguments = '.')
test.must_match('f1.out', "f1.in\nsubdir/foo.dep 3\n")
test.must_match('f2.out', "f2.in\nsubdir/foo.dep 3\n")
test.must_match(['subdir', 'f3.out'], "f3.in\nsubdir/bar.dep 2\n")
test.must_match(['subdir', 'f4.out'], "subdir/f4.in\nsubdir/bar.dep 2\n")
test.must_match('f5.out', "f5.in\nsubdir/foo.dep 2\n")
test.must_match(['sub2', 'f6.out'], "f6.in 2\nsubdir/bar.dep 2\n")
#
test.write(['subdir', 'bar.dep'], "subdir/bar.dep 3\n")
test.run(arguments = '.')
test.must_match('f1.out', "f1.in\nsubdir/foo.dep 3\n")
test.must_match('f2.out', "f2.in\nsubdir/foo.dep 3\n")
test.must_match(['subdir', 'f3.out'], "f3.in\nsubdir/bar.dep 3\n")
test.must_match(['subdir', 'f4.out'], "subdir/f4.in\nsubdir/bar.dep 3\n")
test.must_match('f5.out', "f5.in\nsubdir/foo.dep 2\n")
test.must_match(['sub2', 'f6.out'], "f6.in 2\nsubdir/bar.dep 2\n")
#
test.write('f6.in', "f6.in 3\n")
test.run(arguments = '.')
test.must_match('f1.out', "f1.in\nsubdir/foo.dep 3\n")
test.must_match('f2.out', "f2.in\nsubdir/foo.dep 3\n")
test.must_match(['subdir', 'f3.out'], "f3.in\nsubdir/bar.dep 3\n")
test.must_match(['subdir', 'f4.out'], "subdir/f4.in\nsubdir/bar.dep 3\n")
test.must_match('f5.out', "f5.in\nsubdir/foo.dep 3\n")
test.must_match(['sub2', 'f6.out'], "f6.in 3\nsubdir/bar.dep 3\n")
test.write('SConstruct', """
ParseDepends('nonexistent_file')
""")
test.run()
test.write('SConstruct', """
ParseDepends('nonexistent_file', must_exist=1)
""")
test.run(status=2, stderr=None)
test.fail_test(string.find(test.stderr(), "No such file or directory") == -1)
test.pass_test()
| datalogics/scons | test/ParseDepends.py | Python | mit | 5,431 |
"""Tests for base extension."""
import unittest
from grow.extensions import base_extension
class BaseExtensionTestCase(unittest.TestCase):
"""Test the base extension."""
def test_config_disabled(self):
"""Uses the disabled config."""
ext = base_extension.BaseExtension(None, {
'disabled': [
'a',
],
'enabled': [
'a',
],
})
self.assertFalse(ext.hooks.is_enabled('a'))
self.assertFalse(ext.hooks.is_enabled('b'))
def test_config_enabled(self):
"""Uses the enabled config."""
ext = base_extension.BaseExtension(None, {
'enabled': [
'a',
],
})
self.assertTrue(ext.hooks.is_enabled('a'))
self.assertFalse(ext.hooks.is_enabled('b'))
| grow/pygrow | grow/extensions/base_extension_test.py | Python | mit | 844 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# isprime.py
#
# Author: Billy Wilson Arante
# Created: 2016/06/16 PHT
# Modified: 2016/10/01 EDT (America/New York)
from sys import argv
def isprime(x):
"""Checks if x is prime number
Returns true if x is a prime number, otherwise false.
"""
if x <= 1:
return False
for n in range(2, (x - 1)):
if x % n == 0:
return False
return True
def main():
"""Main"""
filename, number = argv
print isprime(int(number))
if __name__ == "__main__":
main()
| arantebillywilson/python-snippets | py2/cool-things/isprime.py | Python | mit | 565 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from holmes.validators.base import Validator
from holmes.utils import _
class ImageAltValidator(Validator):
@classmethod
def get_without_alt_parsed_value(cls, value):
result = []
for src, name in value:
data = '<a href="%s" target="_blank">%s</a>' % (src, name)
result.append(data)
return {'images': ', '.join(result)}
@classmethod
def get_alt_too_big_parsed_value(cls, value):
result = []
for src, name, alt in value['images']:
data = u'<a href="{}" alt="{}" target="_blank">{}</a>'.format(
src, alt, name
)
result.append(data)
return {
'max_size': value['max_size'],
'images': ', '.join(result)
}
@classmethod
def get_violation_definitions(cls):
return {
'invalid.images.alt': {
'title': _('Image(s) without alt attribute'),
'description': _(
'Images without alt text are not good for '
'Search Engines. Images without alt were '
'found for: %(images)s.'),
'value_parser': cls.get_without_alt_parsed_value,
'category': _('SEO'),
'generic_description': _(
'Images without alt attribute are not good for '
'search engines. They are searchable by the content '
'of this attribute, so if it\'s empty, it cause bad '
'indexing optimization.'
)
},
'invalid.images.alt_too_big': {
'title': _('Image(s) with alt attribute too big'),
'description': _(
'Images with alt text bigger than %(max_size)d chars are '
'not good for search engines. Images with a too big alt '
'were found for: %(images)s.'),
'value_parser': cls.get_alt_too_big_parsed_value,
'category': _('SEO'),
'generic_description': _(
'Images with alt text too long are not good to SEO. '
'This maximum value are configurable '
'by Holmes configuration.'
),
'unit': 'number'
}
}
@classmethod
def get_default_violations_values(cls, config):
return {
'invalid.images.alt_too_big': {
'value': config.MAX_IMAGE_ALT_SIZE,
'description': config.get_description('MAX_IMAGE_ALT_SIZE')
}
}
def validate(self):
max_alt_size = self.get_violation_pref('invalid.images.alt_too_big')
imgs = self.get_imgs()
result_no_alt = []
result_alt_too_big = []
for img in imgs:
src = img.get('src')
if not src:
continue
src = self.normalize_url(src)
img_alt = img.get('alt')
if src:
name = src.rsplit('/', 1)[-1]
if not img_alt:
result_no_alt.append((src, name))
elif len(img_alt) > max_alt_size:
result_alt_too_big.append((src, name, img_alt))
if result_no_alt:
self.add_violation(
key='invalid.images.alt',
value=result_no_alt,
points=20 * len(result_no_alt)
)
if result_alt_too_big:
self.add_violation(
key='invalid.images.alt_too_big',
value={
'images': result_alt_too_big,
'max_size': max_alt_size
},
points=20 * len(result_alt_too_big)
)
def get_imgs(self):
return self.review.data.get('page.all_images', None)
| holmes-app/holmes-api | holmes/validators/image_alt.py | Python | mit | 3,935 |
#!/usr/bin/env python
#coding: utf-8
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
# @return a list of integers
def postorderTraversal(self, root):
pass
| wh-acmer/minixalpha-acm | LeetCode/Python/binary_tree_postorder_traversal_iter.py | Python | mit | 324 |
from .models import Project,Member,Contact,Technology,Contributor
from rest_framework import serializers
class ContactSerializer(serializers.ModelSerializer):
class Meta:
model = Contact
fields = ('name', 'link')
class MemberSerializer(serializers.ModelSerializer):
contacts = ContactSerializer(many=True)
class Meta:
model = Member
fields = ('name', 'post', 'img', 'contacts')
class ContributorSerializer(serializers.ModelSerializer):
class Meta:
model=Contributor
fields = ('name','github')
class ProjectSerializer(serializers.ModelSerializer):
contributors=ContributorSerializer(many=True)
class Meta:
model = Project
fields = ('slug','name','type','desc','icon','technologies','long_desc','contributors','meta') | o-d-i-n/HelloWorld | api/serializers.py | Python | mit | 778 |
from . animation import Animation
from .. layout import strip
class Strip(Animation):
LAYOUT_CLASS = strip.Strip
LAYOUT_ARGS = 'num',
def __init__(self, layout, start=0, end=-1, **kwds):
super().__init__(layout, **kwds)
self._start = max(start, 0)
self._end = end
if self._end < 0 or self._end >= self.layout.numLEDs:
self._end = self.layout.numLEDs - 1
self._size = self._end - self._start + 1
from .. import deprecated
if deprecated.allowed():
BaseStripAnim = Strip
| ManiacalLabs/BiblioPixel | bibliopixel/animation/strip.py | Python | mit | 543 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DeploymentProperties(Model):
"""Deployment properties.
:param template: The template content. It can be a JObject or a well
formed JSON string. Use only one of Template or TemplateLink.
:type template: object
:param template_link: The template URI. Use only one of Template or
TemplateLink.
:type template_link: :class:`TemplateLink
<azure.mgmt.resource.resources.v2016_02_01.models.TemplateLink>`
:param parameters: Deployment parameters. It can be a JObject or a well
formed JSON string. Use only one of Parameters or ParametersLink.
:type parameters: object
:param parameters_link: The parameters URI. Use only one of Parameters or
ParametersLink.
:type parameters_link: :class:`ParametersLink
<azure.mgmt.resource.resources.v2016_02_01.models.ParametersLink>`
:param mode: The deployment mode. Possible values include: 'Incremental',
'Complete'
:type mode: str or :class:`DeploymentMode
<azure.mgmt.resource.resources.v2016_02_01.models.DeploymentMode>`
:param debug_setting: The debug setting of the deployment.
:type debug_setting: :class:`DebugSetting
<azure.mgmt.resource.resources.v2016_02_01.models.DebugSetting>`
"""
_validation = {
'mode': {'required': True},
}
_attribute_map = {
'template': {'key': 'template', 'type': 'object'},
'template_link': {'key': 'templateLink', 'type': 'TemplateLink'},
'parameters': {'key': 'parameters', 'type': 'object'},
'parameters_link': {'key': 'parametersLink', 'type': 'ParametersLink'},
'mode': {'key': 'mode', 'type': 'DeploymentMode'},
'debug_setting': {'key': 'debugSetting', 'type': 'DebugSetting'},
}
def __init__(self, mode, template=None, template_link=None, parameters=None, parameters_link=None, debug_setting=None):
self.template = template
self.template_link = template_link
self.parameters = parameters
self.parameters_link = parameters_link
self.mode = mode
self.debug_setting = debug_setting
| v-iam/azure-sdk-for-python | azure-mgmt-resource/azure/mgmt/resource/resources/v2016_02_01/models/deployment_properties.py | Python | mit | 2,606 |
import enum
class calendar_permissions(enum.IntEnum):
ASCIT = 21
AVERY = 22
BECHTEL = 23
BLACKER = 24
DABNEY = 25
FLEMING = 26
LLOYD = 27
PAGE = 28
RICKETTS = 29
RUDDOCK = 30
OTHER = 31
ATHLETICS = 32
| ASCIT/donut-python | donut/modules/calendar/permissions.py | Python | mit | 251 |
import logging, sys
from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL
class InfoFilter(logging.Filter):
def filter(self, rec):
return rec.levelno in (logging.DEBUG, logging.INFO)
def _new_custom_logger(name='BiblioPixel',
fmt='%(levelname)s - %(module)s - %(message)s'):
logger = logging.getLogger(name)
formatter = logging.Formatter(fmt=fmt)
if len(logger.handlers) == 0:
logger.setLevel(logging.INFO)
h1 = logging.StreamHandler(sys.stdout)
h1.setLevel(logging.DEBUG)
h1.addFilter(InfoFilter())
h1.setFormatter(formatter)
h2 = logging.StreamHandler(sys.stderr)
h2.setLevel(logging.WARNING)
h2.setFormatter(formatter)
logger.addHandler(h1)
logger.addHandler(h2)
return logger
logger = _new_custom_logger()
setLogLevel = logger.setLevel
debug, info, warning, error, critical, exception = (
logger.debug, logger.info, logger.warning, logger.error, logger.critical,
logger.exception)
| sethshill/final | build/lib.linux-armv7l-2.7/bibliopixel/log.py | Python | mit | 1,041 |
def make_colorscale_from_colors(colors):
if len(colors) == 1:
colors *= 2
return tuple((i / (len(colors) - 1), color) for i, color in enumerate(colors))
| KwatME/plot | plot/make_colorscale_from_colors.py | Python | mit | 172 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_corellia_solitude_medium3.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/building/poi/shared_corellia_solitude_medium3.py | Python | mit | 456 |
class Solution(object):
# def convert(self, s, numRows):
# """
# :type s: str
# :type numRows: int
# :rtype: str
# """
# ls = len(s)
# if ls <= 1 or numRows == 1:
# return s
# temp_s = []
# for i in range(numRows):
# temp_s.append(['']*(ls / 2))
# inter = numRows - 1
# col, row = 0, 0
# for i, ch in enumerate(s):
# flag = True
# if (i / inter) % 2 == 1:
# # print i
# flag = False
# if flag:
# temp_s[row][col] = ch
# row += 1
# else:
# temp_s[row][col] = ch
# col += 1
# row -= 1
# result = ''
# for i in range(numRows):
# result += ''.join(temp_s[i])
# return result
def convert(self, s, numRows):
# https://leetcode.com/discuss/90908/easy-python-o-n-solution-94%25-with-explanations
if numRows == 1:
return s
# calculate period
p = 2 * (numRows - 1)
result = [""] * numRows
for i in xrange(len(s)):
floor = i % p
if floor >= p//2:
floor = p - floor
result[floor] += s[i]
return "".join(result)
if __name__ == '__main__':
# begin
s = Solution()
print s.convert("PAYPALISHIRING", 3)
| qiyuangong/leetcode | python/006_ZigZag_Conversion.py | Python | mit | 1,447 |
"""Representation of Z-Wave binary_sensors."""
from openzwavemqtt.const import CommandClass, ValueIndex, ValueType
from homeassistant.components.binary_sensor import (
DOMAIN as BINARY_SENSOR_DOMAIN,
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DATA_UNSUBSCRIBE, DOMAIN
from .entity import ZWaveDeviceEntity
NOTIFICATION_TYPE = "index"
NOTIFICATION_VALUES = "values"
NOTIFICATION_DEVICE_CLASS = "device_class"
NOTIFICATION_SENSOR_ENABLED = "enabled"
NOTIFICATION_OFF_VALUE = "off_value"
NOTIFICATION_VALUE_CLEAR = 0
# Translation from values in Notification CC to binary sensors
# https://github.com/OpenZWave/open-zwave/blob/master/config/NotificationCCTypes.xml
NOTIFICATION_SENSORS = [
{
# Index 1: Smoke Alarm - Value Id's 1 and 2
# Assuming here that Value 1 and 2 are not present at the same time
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_SMOKE_ALARM,
NOTIFICATION_VALUES: [1, 2],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.SMOKE,
},
{
# Index 1: Smoke Alarm - All other Value Id's
# Create as disabled sensors
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_SMOKE_ALARM,
NOTIFICATION_VALUES: [3, 4, 5, 6, 7, 8],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.SMOKE,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 2: Carbon Monoxide - Value Id's 1 and 2
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_CARBON_MONOOXIDE,
NOTIFICATION_VALUES: [1, 2],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.GAS,
},
{
# Index 2: Carbon Monoxide - All other Value Id's
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_CARBON_MONOOXIDE,
NOTIFICATION_VALUES: [4, 5, 7],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.GAS,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 3: Carbon Dioxide - Value Id's 1 and 2
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_CARBON_DIOXIDE,
NOTIFICATION_VALUES: [1, 2],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.GAS,
},
{
# Index 3: Carbon Dioxide - All other Value Id's
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_CARBON_DIOXIDE,
NOTIFICATION_VALUES: [4, 5, 7],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.GAS,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 4: Heat - Value Id's 1, 2, 5, 6 (heat/underheat)
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_HEAT,
NOTIFICATION_VALUES: [1, 2, 5, 6],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.HEAT,
},
{
# Index 4: Heat - All other Value Id's
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_HEAT,
NOTIFICATION_VALUES: [3, 4, 8, 10, 11],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.HEAT,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 5: Water - Value Id's 1, 2, 3, 4
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_WATER,
NOTIFICATION_VALUES: [1, 2, 3, 4],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.MOISTURE,
},
{
# Index 5: Water - All other Value Id's
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_WATER,
NOTIFICATION_VALUES: [5],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.MOISTURE,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 6: Access Control - Value Id's 1, 2, 3, 4 (Lock)
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_ACCESS_CONTROL,
NOTIFICATION_VALUES: [1, 2, 3, 4],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.LOCK,
},
{
# Index 6: Access Control - Value Id 22 (door/window open)
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_ACCESS_CONTROL,
NOTIFICATION_VALUES: [22],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.DOOR,
NOTIFICATION_OFF_VALUE: 23,
},
{
# Index 7: Home Security - Value Id's 1, 2 (intrusion)
# Assuming that value 1 and 2 are not present at the same time
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_HOME_SECURITY,
NOTIFICATION_VALUES: [1, 2],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.SAFETY,
},
{
# Index 7: Home Security - Value Id's 3, 4, 9 (tampering)
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_HOME_SECURITY,
NOTIFICATION_VALUES: [3, 4, 9],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.SAFETY,
},
{
# Index 7: Home Security - Value Id's 5, 6 (glass breakage)
# Assuming that value 5 and 6 are not present at the same time
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_HOME_SECURITY,
NOTIFICATION_VALUES: [5, 6],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.SAFETY,
},
{
# Index 7: Home Security - Value Id's 7, 8 (motion)
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_HOME_SECURITY,
NOTIFICATION_VALUES: [7, 8],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.MOTION,
},
{
# Index 8: Power management - Values 1...9
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_POWER_MANAGEMENT,
NOTIFICATION_VALUES: [1, 2, 3, 4, 5, 6, 7, 8, 9],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.POWER,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 8: Power management - Values 10...15
# Battery values (mutually exclusive)
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_POWER_MANAGEMENT,
NOTIFICATION_VALUES: [10, 11, 12, 13, 14, 15],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.POWER,
NOTIFICATION_SENSOR_ENABLED: False,
NOTIFICATION_OFF_VALUE: None,
},
{
# Index 9: System - Value Id's 1, 2, 6, 7
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_SYSTEM,
NOTIFICATION_VALUES: [1, 2, 6, 7],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.PROBLEM,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 10: Emergency - Value Id's 1, 2, 3
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_EMERGENCY,
NOTIFICATION_VALUES: [1, 2, 3],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.PROBLEM,
},
{
# Index 11: Clock - Value Id's 1, 2
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_CLOCK,
NOTIFICATION_VALUES: [1, 2],
NOTIFICATION_DEVICE_CLASS: None,
NOTIFICATION_SENSOR_ENABLED: False,
},
{
# Index 12: Appliance - All Value Id's
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_APPLIANCE,
NOTIFICATION_VALUES: [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
],
NOTIFICATION_DEVICE_CLASS: None,
},
{
# Index 13: Home Health - Value Id's 1,2,3,4,5
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_APPLIANCE,
NOTIFICATION_VALUES: [1, 2, 3, 4, 5],
NOTIFICATION_DEVICE_CLASS: None,
},
{
# Index 14: Siren
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_SIREN,
NOTIFICATION_VALUES: [1],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.SOUND,
},
{
# Index 15: Water valve
# ignore non-boolean values
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_WATER_VALVE,
NOTIFICATION_VALUES: [3, 4],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.PROBLEM,
},
{
# Index 16: Weather
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_WEATHER,
NOTIFICATION_VALUES: [1, 2],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.PROBLEM,
},
{
# Index 17: Irrigation
# ignore non-boolean values
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_IRRIGATION,
NOTIFICATION_VALUES: [1, 2, 3, 4, 5],
NOTIFICATION_DEVICE_CLASS: None,
},
{
# Index 18: Gas
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_GAS,
NOTIFICATION_VALUES: [1, 2, 3, 4],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.GAS,
},
{
# Index 18: Gas
NOTIFICATION_TYPE: ValueIndex.NOTIFICATION_GAS,
NOTIFICATION_VALUES: [6],
NOTIFICATION_DEVICE_CLASS: BinarySensorDeviceClass.PROBLEM,
},
]
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Z-Wave binary_sensor from config entry."""
@callback
def async_add_binary_sensor(values):
"""Add Z-Wave Binary Sensor(s)."""
async_add_entities(VALUE_TYPE_SENSORS[values.primary.type](values))
hass.data[DOMAIN][config_entry.entry_id][DATA_UNSUBSCRIBE].append(
async_dispatcher_connect(
hass, f"{DOMAIN}_new_{BINARY_SENSOR_DOMAIN}", async_add_binary_sensor
)
)
@callback
def async_get_legacy_binary_sensors(values):
"""Add Legacy/classic Z-Wave Binary Sensor."""
return [ZWaveBinarySensor(values)]
@callback
def async_get_notification_sensors(values):
"""Convert Notification values into binary sensors."""
sensors_to_add = []
for list_value in values.primary.value["List"]:
# check if we have a mapping for this value
for item in NOTIFICATION_SENSORS:
if item[NOTIFICATION_TYPE] != values.primary.index:
continue
if list_value["Value"] not in item[NOTIFICATION_VALUES]:
continue
sensors_to_add.append(
ZWaveListValueSensor(
# required values
values,
list_value["Value"],
item[NOTIFICATION_DEVICE_CLASS],
# optional values
item.get(NOTIFICATION_SENSOR_ENABLED, True),
item.get(NOTIFICATION_OFF_VALUE, NOTIFICATION_VALUE_CLEAR),
)
)
return sensors_to_add
VALUE_TYPE_SENSORS = {
ValueType.BOOL: async_get_legacy_binary_sensors,
ValueType.LIST: async_get_notification_sensors,
}
class ZWaveBinarySensor(ZWaveDeviceEntity, BinarySensorEntity):
"""Representation of a Z-Wave binary_sensor."""
@property
def is_on(self):
"""Return if the sensor is on or off."""
return self.values.primary.value
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
# Legacy binary sensors are phased out (replaced by notification sensors)
# Disable by default to not confuse users
for item in self.values.primary.node.values():
if item.command_class == CommandClass.NOTIFICATION:
# This device properly implements the Notification CC, legacy sensor can be disabled
return False
return True
class ZWaveListValueSensor(ZWaveDeviceEntity, BinarySensorEntity):
"""Representation of a binary_sensor from values in the Z-Wave Notification CommandClass."""
def __init__(
self,
values,
on_value,
device_class=None,
default_enabled=True,
off_value=NOTIFICATION_VALUE_CLEAR,
):
"""Initialize a ZWaveListValueSensor entity."""
super().__init__(values)
self._on_value = on_value
self._device_class = device_class
self._default_enabled = default_enabled
self._off_value = off_value
# make sure the correct value is selected at startup
self._state = False
self.on_value_update()
@callback
def on_value_update(self):
"""Call when a value is added/updated in the underlying EntityValues Collection."""
if self.values.primary.value["Selected_id"] == self._on_value:
# Only when the active ID exactly matches our watched ON value, set sensor state to ON
self._state = True
elif self.values.primary.value["Selected_id"] == self._off_value:
# Only when the active ID exactly matches our watched OFF value, set sensor state to OFF
self._state = False
elif (
self._off_value is None
and self.values.primary.value["Selected_id"] != self._on_value
):
# Off value not explicitly specified
# Some values are reset by the simple fact they're overruled by another value coming in
# For example the battery charging values in Power Management Index
self._state = False
@property
def name(self):
"""Return the name of the entity."""
# Append value label to base name
base_name = super().name
value_label = ""
for item in self.values.primary.value["List"]:
if item["Value"] == self._on_value:
value_label = item["Label"]
break
# Strip "on location" / "at location" from name
# Note: We're assuming that we don't retrieve 2 values with different location
value_label = value_label.split(" on ")[0]
value_label = value_label.split(" at ")[0]
return f"{base_name}: {value_label}"
@property
def unique_id(self):
"""Return the unique_id of the entity."""
unique_id = super().unique_id
return f"{unique_id}.{self._on_value}"
@property
def is_on(self):
"""Return if the sensor is on or off."""
return self._state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._device_class
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
# We hide the more advanced sensors by default to not overwhelm users
return self._default_enabled
| rohitranjan1991/home-assistant | homeassistant/components/ozw/binary_sensor.py | Python | mit | 14,428 |
"""
run quality assurance measures on functional data
"""
import sys,glob
sys.path.append('/corral-repl/utexas/poldracklab/software_lonestar/quality-assessment-protocol')
import os
import numpy
from run_shell_cmd import run_shell_cmd
from compute_fd import compute_fd
from qap import load_func,load_image, load_mask, summary_mask, cnr,efc,fber,fwhm,artifacts,ghost_all,calc_mean_func,calc_dvars,mean_outlier_timepoints,mean_quality_timepoints
basedir='/corral-repl/utexas/poldracklab/data/selftracking/shared_dataset'
funcfiles=glob.glob(os.path.join(basedir,'sub*/BOLD/resting_run001/bold.nii.gz'))
funcdata={'subcode':[],'func_efc':[],'func_fber':[],'func_fwhm':[],'func_gsr':[],'func_dvars':[],'func_outlier':[],'func_quality':[],'func_mean_fd':[],'func_num_fd':[],'func_perc_fd':[]}
#for funcfile in funcfiles:
func_file=funcfiles[0]
if 1:
subcode=func_file.split('/')[7]
print 'processing',subcode
funcdata['subcode'].append(subcode)
mask_file=func_file.replace('.nii.gz','_brain_mask.nii.gz')
if not os.path.exists(mask_file):
cmd='bet %s %s -m -F'%(func_file,func_file.replace('.nii.gz','_brain'))
print cmd
run_shell_cmd(cmd)
func_data = load_func(func_file,mask_file)
mean_func_data = calc_mean_func(func_file)
func_mask = load_mask(mask_file)
func_efc = efc(func_data)
#func_fber = fber(func_data, func_mask)
#func_fwhm = fwhm(func_file, mask_file, out_vox=False)
print 'running ghost_all'
_,func_gsr,_=ghost_all(mean_func_data,func_mask)
print 'running calc_dvars'
func_dvars = calc_dvars(func_data, output_all=False)
print 'running mean_outlier_timepoints'
func_outlier = mean_outlier_timepoints(func_file, mask_file, out_fraction=True)
print 'running compute_fd'
motpars=numpy.loadtxt(func_file.replace('.nii.gz','_mcf.par'))
fd=compute_fd(motpars)
sdf
funcdata['mean_gm'].append(mean_gm)
funcdata['mean_wm'].append(mean_wm)
funcdata['std_bg'].append(std_bg)
funcdata['anat_efc'].append(anat_efc)
funcdata['anat_fber'].append(anat_fber)
funcdata['anat_fwhm'].append(anat_fwhm)
funcdata['anat_qi1'].append(anat_qi1)
| vsoch/myconnectome | myconnectome/qa/run_qap_func.py | Python | mit | 2,129 |
__version__ = "master"
| jvandijk/pla | pla/version.py | Python | mit | 23 |
#!/usr/bin/python
'''
Created on May 14, 2012
@author: Charlie
'''
import ConfigParser
import boto
import cgitb
cgitb.enable()
class MyClass(object):
def __init__(self, domain):
config = ConfigParser.RawConfigParser()
config.read('.boto')
key = config.get('Credentials', 'aws_access_key_id')
secretKey = config.get('Credentials', 'aws_secret_access_key')
self.conn = boto.connect_sdb(key, secretKey)
self.domain = domain
def showDomains(self):
domains = self.conn.get_all_domains()
print domains
def createDomain(self):
self.conn.create_domain(self.domain)
def addData(self, itemName, itemAttrs):
dom = self.conn.get_domain(self.domain)
item_name = itemName
dom.put_attributes(item_name, itemAttrs)
def startXml(self):
xml = "Content-Type: text/xml\n\n"
xml += "<?xml version='1.0'?>\n"
xml += '<test01 count="5">\n'
return xml
def showQuery(self, query):
dom = self.conn.get_domain(self.domain)
result = dom.select(query)
xml = self.startXml()
for item in result:
xml += "\t<line>\n"
keys = item.keys()
keys.sort()
for x in keys:
xml += '\t\t<' + x + '>' + item[x] + '</' + x + '>\n'
xml += "\t</line>\n"
xml += '</test01>'
return xml
my_class = MyClass("Test01")
# my_class.addData('Line01', {'Field01': 'one', 'Field02': 'two'})
# my_class.showDomains()
print my_class.showQuery('select * from Test01')
| donlee888/JsObjects | Python/Prog282SimpleDb/scripts/simpledb.py | Python | mit | 1,593 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class FlattenedProduct(Resource):
"""FlattenedProduct
:param id: Resource Id
:type id: str
:param type: Resource Type
:type type: str
:param tags:
:type tags: dict
:param location: Resource Location
:type location: str
:param name: Resource Name
:type name: str
:param pname:
:type pname: str
:param flattened_product_type:
:type flattened_product_type: str
:param provisioning_state_values: Possible values include: 'Succeeded',
'Failed', 'canceled', 'Accepted', 'Creating', 'Created', 'Updating',
'Updated', 'Deleting', 'Deleted', 'OK'
:type provisioning_state_values: str
:param provisioning_state:
:type provisioning_state: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'pname': {'key': 'properties.pname', 'type': 'str'},
'flattened_product_type': {'key': 'properties.type', 'type': 'str'},
'provisioning_state_values': {'key': 'properties.provisioningStateValues', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, id=None, type=None, tags=None, location=None, name=None, pname=None, flattened_product_type=None, provisioning_state_values=None, provisioning_state=None, **kwargs):
super(FlattenedProduct, self).__init__(id=id, type=type, tags=tags, location=location, name=name, **kwargs)
self.pname = pname
self.flattened_product_type = flattened_product_type
self.provisioning_state_values = provisioning_state_values
self.provisioning_state = provisioning_state
| jkonecki/autorest | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/ModelFlattening/autorestresourceflatteningtestservice/models/flattened_product.py | Python | mit | 2,348 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .deployed_service_replica_info import DeployedServiceReplicaInfo
class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo):
"""Information about a stateful service replica deployed on a node.
:param service_name: Full hierarchical name of the service in URI format
starting with `fabric:`.
:type service_name: str
:param service_type_name: Name of the service type as specified in the
service manifest.
:type service_type_name: str
:param service_manifest_name: The name of the service manifest in which
this service type is defined.
:type service_manifest_name: str
:param code_package_name: The name of the code package that hosts this
replica.
:type code_package_name: str
:param partition_id:
:type partition_id: str
:param replica_status: Possible values include: 'Invalid', 'InBuild',
'Standby', 'Ready', 'Down', 'Dropped'
:type replica_status: str
:param address: The last address returned by the replica in Open or
ChangeRole.
:type address: str
:param service_package_activation_id:
:type service_package_activation_id: str
:param ServiceKind: Polymorphic Discriminator
:type ServiceKind: str
:param replica_id: Id of the stateful service replica.
:type replica_id: str
:param replica_role: Possible values include: 'Unknown', 'None',
'Primary', 'IdleSecondary', 'ActiveSecondary'
:type replica_role: str
"""
_validation = {
'ServiceKind': {'required': True},
}
_attribute_map = {
'service_name': {'key': 'ServiceName', 'type': 'str'},
'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'},
'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'},
'code_package_name': {'key': 'CodePackageName', 'type': 'str'},
'partition_id': {'key': 'PartitionID', 'type': 'str'},
'replica_status': {'key': 'ReplicaStatus', 'type': 'str'},
'address': {'key': 'Address', 'type': 'str'},
'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'},
'ServiceKind': {'key': 'ServiceKind', 'type': 'str'},
'replica_id': {'key': 'ReplicaId', 'type': 'str'},
'replica_role': {'key': 'ReplicaRole', 'type': 'str'},
}
def __init__(self, service_name=None, service_type_name=None, service_manifest_name=None, code_package_name=None, partition_id=None, replica_status=None, address=None, service_package_activation_id=None, replica_id=None, replica_role=None):
super(DeployedStatefulServiceReplicaInfo, self).__init__(service_name=service_name, service_type_name=service_type_name, service_manifest_name=service_manifest_name, code_package_name=code_package_name, partition_id=partition_id, replica_status=replica_status, address=address, service_package_activation_id=service_package_activation_id)
self.replica_id = replica_id
self.replica_role = replica_role
self.ServiceKind = 'Stateful'
| v-iam/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/deployed_stateful_service_replica_info.py | Python | mit | 3,502 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import json
from io import open
import warnings
from pymatgen.electronic_structure.bandstructure import Kpoint
from pymatgen import Lattice
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.io.vasp import BSVasprun
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine, get_reconstructed_band_structure, \
LobsterBandStructureSymmLine
from pymatgen.util.testing import PymatgenTest
from monty.serialization import loadfn
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class KpointTest(unittest.TestCase):
def setUp(self):
self.lattice = Lattice.cubic(10.0)
self.kpoint = Kpoint([0.1, 0.4, -0.5], self.lattice, label="X")
def test_properties(self):
self.assertEqual(self.kpoint.frac_coords[0], 0.1)
self.assertEqual(self.kpoint.frac_coords[1], 0.4)
self.assertEqual(self.kpoint.frac_coords[2], -0.5)
self.assertEqual(self.kpoint.a, 0.1)
self.assertEqual(self.kpoint.b, 0.4)
self.assertEqual(self.kpoint.c, -0.5)
self.assertEqual(self.lattice, Lattice.cubic(10.0))
self.assertEqual(self.kpoint.cart_coords[0], 1.0)
self.assertEqual(self.kpoint.cart_coords[1], 4.0)
self.assertEqual(self.kpoint.cart_coords[2], -5.0)
self.assertEqual(self.kpoint.label, "X")
class BandStructureSymmLine_test(PymatgenTest):
def setUp(self):
self.bs = loadfn(os.path.join(test_dir, "Cu2O_361_bandstructure.json"))
self.bs2 = loadfn(os.path.join(test_dir, "CaO_2605_bandstructure.json"))
self.bs_spin = loadfn(os.path.join(test_dir, "NiO_19009_bandstructure.json"))
self.bs_cbm0 = loadfn(os.path.join(test_dir, "InN_22205_bandstructure.json"))
self.bs_cu = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
self.bs_diff_spins = loadfn(os.path.join(test_dir, "VBr2_971787_bandstructure.json"))
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_basic(self):
self.assertArrayAlmostEqual(self.bs.projections[Spin.up][10][12][0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.assertArrayAlmostEqual(self.bs.projections[Spin.up][25][0][
Orbital.dyz.value],
[0.0, 0.0, 0.0011, 0.0219, 0.0219, 0.069])
self.assertAlmostEqual(
self.bs.get_projection_on_elements()[Spin.up][25][10]['O'], 0.0328)
self.assertAlmostEqual(
self.bs.get_projection_on_elements()[Spin.up][22][25]['Cu'], 0.8327)
proj = self.bs.get_projections_on_elements_and_orbitals({'Cu': ['s',
'd']})
self.assertAlmostEqual(
proj[Spin.up][25][0]['Cu']['s'], 0.0027)
self.assertAlmostEqual(
proj[Spin.up][25][0]['Cu']['d'], 0.8495999999999999)
self.assertEqual(self.bs2.nb_bands, 16)
self.assertAlmostEqual(self.bs2.bands[Spin.up][5][10], 0.5608)
self.assertAlmostEqual(self.bs2.bands[Spin.up][5][10], 0.5608)
self.assertEqual(self.bs2.branches[5]['name'], "L-U")
self.assertEqual(self.bs2.branches[5]['start_index'], 80)
self.assertEqual(self.bs2.branches[5]['end_index'], 95)
self.assertAlmostEqual(self.bs2.distance[70], 4.2335127528765737)
self.assertEqual(self.bs_spin.nb_bands, 27)
self.assertAlmostEqual(self.bs_spin.bands[Spin.up][5][10], 0.262)
self.assertAlmostEqual(self.bs_spin.bands[Spin.down][5][10],
1.6156)
def test_properties(self):
self.one_kpoint = self.bs2.kpoints[31]
self.assertEqual(self.one_kpoint.frac_coords[0], 0.5)
self.assertEqual(self.one_kpoint.frac_coords[1], 0.25)
self.assertEqual(self.one_kpoint.frac_coords[2], 0.75)
self.assertAlmostEqual(self.one_kpoint.cart_coords[0], 0.64918757)
self.assertAlmostEqual(self.one_kpoint.cart_coords[1], 1.29837513)
self.assertAlmostEqual(self.one_kpoint.cart_coords[2], 0.0)
self.assertEqual(self.one_kpoint.label, "W")
self.assertAlmostEqual(self.bs2.efermi, 2.6211967, "wrong fermi energy")
def test_get_branch(self):
self.assertAlmostEqual(self.bs2.get_branch(110)[0]['name'], "U-W")
def test_get_direct_band_gap_dict(self):
direct_dict = self.bs_diff_spins.get_direct_band_gap_dict()
self.assertEqual(direct_dict[Spin.down]['value'], 4.5365)
for bs in [self.bs2, self.bs_spin]:
dg_dict = bs.get_direct_band_gap_dict()
for spin, v in bs.bands.items():
kpt = dg_dict[spin]['kpoint_index']
vb, cb = dg_dict[spin]['band_indices']
gap = v[cb][kpt] - v[vb][kpt]
self.assertEqual(gap, dg_dict[spin]['value'])
self.assertRaises(ValueError, self.bs_cu.get_direct_band_gap_dict)
def test_get_direct_band_gap(self):
self.assertAlmostEqual(self.bs2.get_direct_band_gap(),
4.0125999999999999)
self.assertTrue(self.bs_diff_spins.get_direct_band_gap() > 0)
self.assertEqual(self.bs_cu.get_direct_band_gap(), 0)
def test_is_metal(self):
self.assertFalse(self.bs2.is_metal(), "wrong metal assignment")
self.assertFalse(self.bs_spin.is_metal(), "wrong metal assignment")
self.assertTrue(self.bs_cu.is_metal(), "wrong metal assignment")
def test_get_cbm(self):
cbm = self.bs2.get_cbm()
self.assertAlmostEqual(cbm['energy'], 5.8709, "wrong CBM energy")
self.assertEqual(cbm['band_index'][Spin.up][0], 8, "wrong CBM band index")
self.assertEqual(cbm['kpoint_index'][0], 15, "wrong CBM kpoint index")
self.assertEqual(cbm['kpoint'].frac_coords[0], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[2], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].label, "X", "wrong CBM kpoint label")
cbm_spin = self.bs_spin.get_cbm()
self.assertAlmostEqual(cbm_spin['energy'], 8.0458, "wrong CBM energy")
self.assertEqual(cbm_spin['band_index'][Spin.up][0], 12, "wrong CBM band index")
self.assertEqual(len(cbm_spin['band_index'][Spin.down]), 0, "wrong CBM band index")
self.assertEqual(cbm_spin['kpoint_index'][0], 0, "wrong CBM kpoint index")
self.assertEqual(cbm_spin['kpoint'].frac_coords[0], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[2], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].label, "\\Gamma", "wrong CBM kpoint label")
def test_get_vbm(self):
vbm = self.bs2.get_vbm()
self.assertAlmostEqual(vbm['energy'], 2.2361, "wrong VBM energy")
self.assertEqual(len(vbm['band_index'][Spin.up]), 3, "wrong VBM number of bands")
self.assertEqual(vbm['band_index'][Spin.up][0], 5, "wrong VBM band index")
self.assertEqual(vbm['kpoint_index'][0], 0, "wrong VBM kpoint index")
self.assertEqual(vbm['kpoint'].frac_coords[0], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[1], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[2], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].label, "\\Gamma", "wrong VBM kpoint label")
vbm_spin = self.bs_spin.get_vbm()
self.assertAlmostEqual(vbm_spin['energy'], 5.731, "wrong VBM energy")
self.assertEqual(len(vbm_spin['band_index'][Spin.up]), 2, "wrong VBM number of bands")
self.assertEqual(len(vbm_spin['band_index'][Spin.down]), 0, "wrong VBM number of bands")
self.assertEqual(vbm_spin['band_index'][Spin.up][0], 10, "wrong VBM band index")
self.assertEqual(vbm_spin['kpoint_index'][0], 79, "wrong VBM kpoint index")
self.assertEqual(vbm_spin['kpoint'].frac_coords[0], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].frac_coords[1], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].frac_coords[2], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].label, "L", "wrong VBM kpoint label")
def test_get_band_gap(self):
bg = self.bs2.get_band_gap()
self.assertAlmostEqual(bg['energy'], 3.6348, "wrong gap energy")
self.assertEqual(bg['transition'], "\\Gamma-X", "wrong kpoint transition")
self.assertFalse(bg['direct'], "wrong nature of the gap")
bg_spin = self.bs_spin.get_band_gap()
self.assertAlmostEqual(bg_spin['energy'], 2.3148, "wrong gap energy")
self.assertEqual(bg_spin['transition'], "L-\\Gamma", "wrong kpoint transition")
self.assertFalse(bg_spin['direct'], "wrong nature of the gap")
bg_cbm0 = self.bs_cbm0.get_band_gap()
self.assertAlmostEqual(bg_cbm0['energy'], 0, places=3, msg="wrong gap energy")
def test_get_sym_eq_kpoints_and_degeneracy(self):
bs = self.bs2
cbm_k = bs.get_cbm()['kpoint'].frac_coords
vbm_k = bs.get_vbm()['kpoint'].frac_coords
self.assertEqual(bs.get_kpoint_degeneracy(cbm_k), None)
bs.structure = loadfn(os.path.join(test_dir, "CaO_2605_structure.json"))
self.assertEqual(bs.get_kpoint_degeneracy(cbm_k), 3)
self.assertEqual(bs.get_kpoint_degeneracy(vbm_k), 1)
cbm_eqs = bs.get_sym_eq_kpoints(cbm_k)
self.assertTrue([0.5, 0., 0.5] in cbm_eqs)
self.assertTrue([0., 0.5, 0.5] in cbm_eqs)
self.assertTrue([0.5, 0.5, 0.] in cbm_eqs)
vbm_eqs = bs.get_sym_eq_kpoints(vbm_k)
self.assertTrue([0., 0., 0.] in vbm_eqs)
def test_as_dict(self):
s = json.dumps(self.bs.as_dict())
self.assertIsNotNone(s)
s = json.dumps(self.bs2.as_dict())
self.assertIsNotNone(s)
s = json.dumps(self.bs_spin.as_dict())
self.assertIsNotNone(s)
def test_old_format_load(self):
with open(os.path.join(test_dir, "bs_ZnS_old.json"),
"r", encoding='utf-8') as f:
d = json.load(f)
bs_old = BandStructureSymmLine.from_dict(d)
self.assertEqual(bs_old.get_projection_on_elements()[
Spin.up][0][0]['Zn'], 0.0971)
class ReconstructBandStructureTest(PymatgenTest):
def setUp(self):
self.bs_cu = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
self.bs_cu2 = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_reconstruct_band_structure(self):
bs = get_reconstructed_band_structure([self.bs_cu, self.bs_cu2])
self.assertEqual(bs.bands[Spin.up].shape, (20, 700), "wrong number of bands or kpoints")
def test_vasprun_bs(self):
bsv = BSVasprun(os.path.join(test_dir, "vasprun.xml"),
parse_projected_eigen=True,
parse_potcar_file=True)
bs = bsv.get_band_structure(kpoints_filename=os.path.join(test_dir, "KPOINTS.band"),
line_mode=True)
bs.get_projection_on_elements()
class LobsterBandStructureSymmLine_test(PymatgenTest):
def setUp(self):
warnings.simplefilter("ignore")
with open(os.path.join(test_dir, "cohp/Fatband_SiO2/Test_p/lobster_band_structure_spin.json"), 'r') as f:
bs_spin_dict = json.load(f)
self.bs_spin = LobsterBandStructureSymmLine.from_dict(bs_spin_dict)
with open(os.path.join(test_dir, "cohp/Fatband_SiO2/Test_p/lobster_band_structure.json"), 'r') as f:
bs_dict = json.load(f)
self.bs_p = LobsterBandStructureSymmLine.from_dict(bs_dict)
def tearDown(self):
warnings.simplefilter("default")
def test_basic(self):
bs_p = self.bs_p
bs_spin = self.bs_spin
self.assertAlmostEqual(bs_p.structure[0].frac_coords[0], 0.)
self.assertAlmostEqual(bs_p.structure[0].frac_coords[1], 0.47634315)
self.assertAlmostEqual(bs_p.structure[0].frac_coords[2], 0.666667)
self.assertEqual(bs_p.structure[0].species_string, 'Si')
self.assertAlmostEqual(bs_p.structure[0].coords[0], -1.19607309)
self.assertAlmostEqual(bs_p.structure[0].coords[1], 2.0716597)
self.assertAlmostEqual(bs_p.structure[0].coords[2], 3.67462144)
self.assertAlmostEqual(bs_p.efermi, 1.06470288)
lattice = bs_p.lattice_rec.as_dict()
self.assertAlmostEqual(lattice["matrix"][0][0], 1.2511575194890285)
self.assertAlmostEqual(lattice["matrix"][0][1], 0.7223560132915973)
self.assertAlmostEqual(lattice["matrix"][0][2], 0.0)
self.assertAlmostEqual(lattice["matrix"][1][0], 0.0)
self.assertAlmostEqual(lattice["matrix"][1][1], 1.4447123171425553)
self.assertAlmostEqual(lattice["matrix"][1][2], 0.0)
self.assertAlmostEqual(lattice["matrix"][2][0], 0.0)
self.assertAlmostEqual(lattice["matrix"][2][1], 0.0)
self.assertAlmostEqual(lattice["matrix"][2][2], 1.1399248502312707)
self.assertAlmostEqual(bs_p.kpoints[8].frac_coords[0], 0.09090909)
self.assertAlmostEqual(bs_p.kpoints[8].frac_coords[1], 0.0)
self.assertAlmostEqual(bs_p.kpoints[8].frac_coords[2], 0.0)
self.assertAlmostEqual(bs_p.kpoints[8].cart_coords[0], 0.11374159)
self.assertAlmostEqual(bs_p.kpoints[8].cart_coords[1], 0.06566873)
self.assertAlmostEqual(bs_p.kpoints[8].cart_coords[2], 0.)
self.assertAlmostEqual(bs_p.kpoints[50].frac_coords[0], 0.46153846)
self.assertAlmostEqual(bs_p.kpoints[50].frac_coords[1], 0.07692308)
self.assertAlmostEqual(bs_p.kpoints[50].frac_coords[2], 0.0)
self.assertAlmostEqual(bs_p.kpoints[50].cart_coords[0], 0.57745732)
self.assertAlmostEqual(bs_p.kpoints[50].cart_coords[1], 0.4445268)
self.assertAlmostEqual(bs_p.kpoints[50].cart_coords[2], 0.0)
self.assertAlmostEqual(bs_p.distance[30], 0.49251552363382556)
self.assertTrue(bs_p.branches[0]["name"], '\\Gamma-K')
self.assertAlmostEqual(bs_p.get_band_gap()["energy"], 5.6739999999999995)
self.assertAlmostEqual(bs_p.get_projection_on_elements()[Spin.up][0][0]["Si"], 3 * (0.001 + 0.064))
self.assertAlmostEqual(bs_p.get_projections_on_elements_and_orbitals({"Si": ["3p"]})[Spin.up][0][0]["Si"]["3p"],
0.003)
self.assertAlmostEqual(bs_p.get_projections_on_elements_and_orbitals({"O": ["2p"]})[Spin.up][0][0]["O"]["2p"],
0.002 * 3 + 0.003 * 3)
dict_here = bs_p.get_projections_on_elements_and_orbitals({"Si": ["3s", "3p"], "O": ["2s", "2p"]})[Spin.up][0][
0]
self.assertAlmostEqual(dict_here["Si"]["3s"], 0.192)
self.assertAlmostEqual(dict_here["Si"]["3p"], 0.003)
self.assertAlmostEqual(dict_here["O"]["2s"], 0.792)
self.assertAlmostEqual(dict_here["O"]["2p"], 0.015)
self.assertAlmostEqual(bs_spin.get_projection_on_elements()[Spin.up][0][0]["Si"], 3 * (0.001 + 0.064))
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"Si": ["3p"]})[Spin.up][0][0]["Si"]["3p"],
0.003)
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"O": ["2p"]})[Spin.up][0][0]["O"]["2p"],
0.002 * 3 + 0.003 * 3)
dict_here = \
bs_spin.get_projections_on_elements_and_orbitals({"Si": ["3s", "3p"], "O": ["2s", "2p"]})[Spin.up][0][0]
self.assertAlmostEqual(dict_here["Si"]["3s"], 0.192)
self.assertAlmostEqual(dict_here["Si"]["3p"], 0.003)
self.assertAlmostEqual(dict_here["O"]["2s"], 0.792)
self.assertAlmostEqual(dict_here["O"]["2p"], 0.015)
self.assertAlmostEqual(bs_spin.get_projection_on_elements()[Spin.up][0][0]["Si"], 3 * (0.001 + 0.064))
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"Si": ["3p"]})[Spin.down][0][0]["Si"]["3p"],
0.003)
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"O": ["2p"]})[Spin.down][0][0]["O"]["2p"],
0.002 * 3 + 0.003 * 3)
dict_here = \
bs_spin.get_projections_on_elements_and_orbitals({"Si": ["3s", "3p"], "O": ["2s", "2p"]})[Spin.down][0][0]
self.assertAlmostEqual(dict_here["Si"]["3s"], 0.192)
self.assertAlmostEqual(dict_here["Si"]["3p"], 0.003)
self.assertAlmostEqual(dict_here["O"]["2s"], 0.792)
self.assertAlmostEqual(dict_here["O"]["2p"], 0.015)
def test_get_branch(self):
branch = self.bs_p.get_branch(0)[0]
self.assertEqual(branch["name"], '\\Gamma-K')
self.assertEqual(branch["start_index"], 0)
self.assertEqual(branch["end_index"], 70)
self.assertEqual(branch["index"], 0)
def test_get_direct_band_gap_dict(self):
direct_dict = self.bs_p.get_direct_band_gap_dict()
self.assertAlmostEqual(direct_dict[Spin.up]["value"], 6.005999999999999)
self.assertEqual(direct_dict[Spin.up]["kpoint_index"], 0)
self.assertListEqual(direct_dict[Spin.up]["band_indices"], [22, 24])
direct_dict = self.bs_spin.get_direct_band_gap_dict()
self.assertAlmostEqual(direct_dict[Spin.up]["value"], 6.005999999999999)
self.assertEqual(direct_dict[Spin.up]["kpoint_index"], 0)
self.assertListEqual(direct_dict[Spin.up]["band_indices"], [22, 24])
self.assertAlmostEqual(direct_dict[Spin.down]["value"], 6.005999999999999)
self.assertEqual(direct_dict[Spin.down]["kpoint_index"], 0)
self.assertListEqual(direct_dict[Spin.down]["band_indices"], [22, 24])
def test_get_direct_band_gap(self):
self.assertAlmostEqual(self.bs_p.get_direct_band_gap(), 6.005999999999999)
self.assertAlmostEqual(self.bs_spin.get_direct_band_gap(), 6.005999999999999)
def test_is_metal(self):
self.assertFalse(self.bs_p.is_metal(), "wrong metal assignment")
self.assertFalse(self.bs_spin.is_metal(), "wrong metal assignment")
def test_get_cbm(self):
cbm = self.bs_p.get_cbm()
self.assertAlmostEqual(cbm['energy'], 6.3037028799999995, "wrong CBM energy")
self.assertEqual(cbm['band_index'][Spin.up][0], 24, "wrong CBM band index")
self.assertEqual(cbm['kpoint_index'][0], 0, "wrong CBM kpoint index")
self.assertEqual(cbm['kpoint'].frac_coords[0], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[2], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].label, "\\Gamma", "wrong CBM kpoint label")
cbm_spin = self.bs_spin.get_cbm()
self.assertAlmostEqual(cbm_spin['energy'], 6.30370274, "wrong CBM energy")
self.assertEqual(cbm_spin['band_index'][Spin.up][0], 24, "wrong CBM band index")
self.assertEqual(len(cbm_spin['band_index'][Spin.down]), 1, "wrong CBM band index")
self.assertEqual(cbm_spin['kpoint_index'][0], 0, "wrong CBM kpoint index")
self.assertEqual(cbm_spin['kpoint'].frac_coords[0], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[2], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].label, "\\Gamma", "wrong CBM kpoint label")
def test_get_vbm(self):
vbm = self.bs_p.get_vbm()
self.assertAlmostEqual(vbm['energy'], 0.62970288, "wrong VBM energy")
self.assertEqual(len(vbm['band_index'][Spin.up]), 1, "wrong VBM number of bands")
self.assertEqual(vbm['band_index'][Spin.up][0], 23, "wrong VBM band index")
self.assertEqual(vbm['kpoint_index'][0], 68, "wrong VBM kpoint index")
self.assertAlmostEqual(vbm['kpoint'].frac_coords[0], 0.34615384615385, "wrong VBM kpoint frac coords")
self.assertAlmostEqual(vbm['kpoint'].frac_coords[1], 0.30769230769231, "wrong VBM kpoint frac coords")
self.assertAlmostEqual(vbm['kpoint'].frac_coords[2], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].label, None, "wrong VBM kpoint label")
vbm_spin = self.bs_spin.get_vbm()
self.assertAlmostEqual(vbm_spin['energy'], 0.6297027399999999, "wrong VBM energy")
self.assertEqual(len(vbm_spin['band_index'][Spin.up]), 1, "wrong VBM number of bands")
self.assertEqual(len(vbm_spin['band_index'][Spin.down]), 1, "wrong VBM number of bands")
self.assertEqual(vbm_spin['band_index'][Spin.up][0], 23, "wrong VBM band index")
self.assertEqual(vbm_spin['kpoint_index'][0], 68, "wrong VBM kpoint index")
self.assertAlmostEqual(vbm_spin['kpoint'].frac_coords[0], 0.34615384615385, "wrong VBM kpoint frac coords")
self.assertAlmostEqual(vbm_spin['kpoint'].frac_coords[1], 0.30769230769231, "wrong VBM kpoint frac coords")
self.assertAlmostEqual(vbm_spin['kpoint'].frac_coords[2], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].label, None, "wrong VBM kpoint label")
def test_get_band_gap(self):
bg = self.bs_p.get_band_gap()
self.assertAlmostEqual(bg['energy'], 5.6739999999999995, "wrong gap energy")
self.assertEqual(bg['transition'], "(0.346,0.308,0.000)-\\Gamma", "wrong kpoint transition")
self.assertFalse(bg['direct'], "wrong nature of the gap")
bg_spin = self.bs_spin.get_band_gap()
self.assertAlmostEqual(bg_spin['energy'], 5.674, "wrong gap energy")
self.assertEqual(bg_spin['transition'], "(0.346,0.308,0.000)-\\Gamma", "wrong kpoint transition")
self.assertFalse(bg_spin['direct'], "wrong nature of the gap")
def test_get_sym_eq_kpoints_and_degeneracy(self):
bs = self.bs_p
cbm_k = bs.get_cbm()['kpoint'].frac_coords
vbm_k = bs.get_vbm()['kpoint'].frac_coords
self.assertEqual(bs.get_kpoint_degeneracy(cbm_k), 1)
self.assertEqual(bs.get_kpoint_degeneracy(vbm_k), 3)
def test_as_dict(self):
s = json.dumps(self.bs_p.as_dict())
self.assertIsNotNone(s)
s = json.dumps(self.bs_spin.as_dict())
self.assertIsNotNone(s)
def test_old_format_load(self):
# this method will use the loading from the old dict
self.bs_spin.apply_scissor(3.0)
if __name__ == '__main__':
unittest.main()
| dongsenfo/pymatgen | pymatgen/electronic_structure/tests/test_bandstructure.py | Python | mit | 23,174 |
"""
SPLIF Fingerprints for molecular complexes.
"""
import logging
import itertools
import numpy as np
from deepchem.utils.hash_utils import hash_ecfp_pair
from deepchem.utils.rdkit_utils import load_complex
from deepchem.utils.rdkit_utils import compute_all_ecfp
from deepchem.utils.rdkit_utils import MoleculeLoadException
from deepchem.utils.rdkit_utils import compute_contact_centroid
from deepchem.feat import ComplexFeaturizer
from deepchem.utils.hash_utils import vectorize
from deepchem.utils.voxel_utils import voxelize
from deepchem.utils.voxel_utils import convert_atom_pair_to_voxel
from deepchem.utils.geometry_utils import compute_pairwise_distances
from deepchem.utils.geometry_utils import subtract_centroid
from typing import Tuple, Dict, List
logger = logging.getLogger(__name__)
SPLIF_CONTACT_BINS = [(0, 2.0), (2.0, 3.0), (3.0, 4.5)]
def compute_splif_features_in_range(frag1: Tuple,
frag2: Tuple,
pairwise_distances: np.ndarray,
contact_bin: List,
ecfp_degree: int = 2) -> Dict:
"""Computes SPLIF features for close atoms in molecular complexes.
Finds all frag1 atoms that are > contact_bin[0] and <
contact_bin[1] away from frag2 atoms. Then, finds the ECFP
fingerprints for the contacting atoms. Returns a dictionary
mapping (frag1_index_i, frag2_index_j) --> (frag1_ecfp_i,
frag2_ecfp_j)
Parameters
----------
frag1: Tuple
A tuple of (coords, mol) returned by `load_molecule`.
frag2: Tuple
A tuple of (coords, mol) returned by `load_molecule`.
contact_bins: np.ndarray
Ranges of pair distances which are placed in separate bins.
pairwise_distances: np.ndarray
Array of pairwise fragment-fragment distances (Angstroms)
ecfp_degree: int
ECFP radius
"""
contacts = np.nonzero((pairwise_distances > contact_bin[0]) &
(pairwise_distances < contact_bin[1]))
frag1_atoms = set([int(c) for c in contacts[0].tolist()])
frag1_ecfp_dict = compute_all_ecfp(
frag1[1], indices=frag1_atoms, degree=ecfp_degree)
frag2_ecfp_dict = compute_all_ecfp(frag2[1], degree=ecfp_degree)
splif_dict = {
contact: (frag1_ecfp_dict[contact[0]], frag2_ecfp_dict[contact[1]])
for contact in zip(contacts[0], contacts[1])
}
return splif_dict
def featurize_splif(frag1, frag2, contact_bins, pairwise_distances,
ecfp_degree):
"""Computes SPLIF featurization of fragment interactions binding pocket.
For each contact range (i.e. 1 A to 2 A, 2 A to 3 A, etc.)
compute a dictionary mapping (frag1_index_i, frag2_index_j)
tuples --> (frag1_ecfp_i, frag2_ecfp_j) tuples. Return a
list of such splif dictionaries.
Parameters
----------
frag1: Tuple
A tuple of (coords, mol) returned by `load_molecule`.
frag2: Tuple
A tuple of (coords, mol) returned by `load_molecule`.
contact_bins: np.ndarray
Ranges of pair distances which are placed in separate bins.
pairwise_distances: np.ndarray
Array of pairwise fragment-fragment distances (Angstroms)
ecfp_degree: int
ECFP radius, the graph distance at which fragments are computed.
Returns
-------
Dictionaries of SPLIF interactions suitable for `vectorize` or
`voxelize`.
"""
splif_dicts = []
for i, contact_bin in enumerate(contact_bins):
splif_dicts.append(
compute_splif_features_in_range(frag1, frag2, pairwise_distances,
contact_bin, ecfp_degree))
return splif_dicts
class SplifFingerprint(ComplexFeaturizer):
"""Computes SPLIF Fingerprints for a macromolecular complex.
SPLIF fingerprints are based on a technique introduced in the
following paper.
Da, C., and D. Kireev. "Structural protein–ligand interaction
fingerprints (SPLIF) for structure-based virtual screening:
method and benchmark study." Journal of chemical information
and modeling 54.9 (2014): 2555-2561.
SPLIF fingerprints are a subclass of `ComplexFeaturizer`. It
requires 3D coordinates for a molecular complex. For each ligand
atom, it identifies close pairs of atoms from different molecules.
These atom pairs are expanded to 2D circular fragments and a
fingerprint for the union is turned on in the bit vector. Note that
we slightly generalize the original paper by not requiring the
interacting molecules to be proteins or ligands.
This is conceptually pretty similar to
`ContactCircularFingerprint` but computes ECFP fragments only
for direct contacts instead of the entire contact region.
For a macromolecular complex, returns a vector of shape
`(len(contact_bins)*size,)`
"""
def __init__(self, contact_bins=None, radius=2, size=8):
"""
Parameters
----------
contact_bins: list[tuple]
List of contact bins. If not specified is set to default
`[(0, 2.0), (2.0, 3.0), (3.0, 4.5)]`.
radius : int, optional (default 2)
Fingerprint radius used for circular fingerprints.
size: int, optional (default 8)
Length of generated bit vector.
"""
if contact_bins is None:
self.contact_bins = SPLIF_CONTACT_BINS
else:
self.contact_bins = contact_bins
self.size = size
self.radius = radius
def _featurize(self, datapoint, **kwargs):
"""
Compute featurization for a molecular complex
Parameters
----------
datapoint: Tuple[str, str]
Filenames for molecule and protein.
"""
if 'complex' in kwargs:
datapoint = kwargs.get("complex")
raise DeprecationWarning(
'Complex is being phased out as a parameter, please pass "datapoint" instead.'
)
try:
fragments = load_complex(datapoint, add_hydrogens=False)
except MoleculeLoadException:
logger.warning("This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features = []
# We compute pairwise contact fingerprints
for (frag1, frag2) in itertools.combinations(fragments, 2):
# Get coordinates
distances = compute_pairwise_distances(frag1[0], frag2[0])
# distances = compute_pairwise_distances(prot_xyz, lig_xyz)
vectors = [
vectorize(hash_ecfp_pair, feature_dict=splif_dict,
size=self.size) for splif_dict in featurize_splif(
frag1, frag2, self.contact_bins, distances, self.radius)
]
pairwise_features += vectors
pairwise_features = np.concatenate(pairwise_features)
return pairwise_features
class SplifVoxelizer(ComplexFeaturizer):
"""Computes SPLIF voxel grid for a macromolecular complex.
SPLIF fingerprints are based on a technique introduced in the
following paper [1]_.
The SPLIF voxelizer localizes local SPLIF descriptors in
space, by assigning features to the voxel in which they
originated. This technique may be useful for downstream
learning methods such as convolutional networks.
Featurizes a macromolecular complex into a tensor of shape
`(voxels_per_edge, voxels_per_edge, voxels_per_edge, size)`
where `voxels_per_edge = int(box_width/voxel_width)`.
References
----------
.. [1] Da, C., and D. Kireev. "Structural protein–ligand interaction
fingerprints (SPLIF) for structure-based virtual screening:
method and benchmark study." Journal of chemical information
and modeling 54.9 (2014): 2555-2561.
"""
def __init__(self,
cutoff: float = 4.5,
contact_bins: List = None,
radius: int = 2,
size: int = 8,
box_width: float = 16.0,
voxel_width: float = 1.0):
"""
Parameters
----------
cutoff: float (default 4.5)
Distance cutoff in angstroms for molecules in complex.
contact_bins: list[tuple]
List of contact bins. If not specified is set to default
`[(0, 2.0), (2.0, 3.0), (3.0, 4.5)]`.
radius : int, optional (default 2)
Fingerprint radius used for circular fingerprints.
size: int, optional (default 8)
Length of generated bit vector.
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box
is centered on a ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid.
"""
self.cutoff = cutoff
if contact_bins is None:
self.contact_bins = SPLIF_CONTACT_BINS
else:
self.contact_bins = contact_bins
self.size = size
self.radius = radius
self.box_width = box_width
self.voxel_width = voxel_width
self.voxels_per_edge = int(self.box_width / self.voxel_width)
def _featurize(self, datapoint, **kwargs):
"""
Compute featurization for a molecular complex
Parameters
----------
datapoint: Tuple[str, str]
Filenames for molecule and protein.
"""
if 'complex' in kwargs:
datapoint = kwargs.get("complex")
raise DeprecationWarning(
'Complex is being phased out as a parameter, please pass "datapoint" instead.'
)
try:
fragments = load_complex(datapoint, add_hydrogens=False)
except MoleculeLoadException:
logger.warning("This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features = []
# We compute pairwise contact fingerprints
centroid = compute_contact_centroid(fragments, cutoff=self.cutoff)
for (frag1, frag2) in itertools.combinations(fragments, 2):
distances = compute_pairwise_distances(frag1[0], frag2[0])
frag1_xyz = subtract_centroid(frag1[0], centroid)
frag2_xyz = subtract_centroid(frag2[0], centroid)
xyzs = [frag1_xyz, frag2_xyz]
pairwise_features.append(
np.concatenate(
[
voxelize(
convert_atom_pair_to_voxel,
hash_function=hash_ecfp_pair,
coordinates=xyzs,
box_width=self.box_width,
voxel_width=self.voxel_width,
feature_dict=splif_dict,
nb_channel=self.size)
for splif_dict in featurize_splif(
frag1, frag2, self.contact_bins, distances, self.radius)
],
axis=-1))
# Features are of shape (voxels_per_edge, voxels_per_edge, voxels_per_edge, 1) so we should concatenate on the last axis.
return np.concatenate(pairwise_features, axis=-1)
| deepchem/deepchem | deepchem/feat/complex_featurizers/splif_fingerprints.py | Python | mit | 10,535 |
# -*- coding: utf-8 -*-
from .base import _Base, Base
from .base_collection import BaseCollection
from .habit_groups import HabitGroups
from .habit_groups_collection import HabitGroupsCollection
from .habits import Habits
from .habits_collection import HabitsCollection
from .users import Users
from .users_collection import UsersCollection
from .attempts import Attempts
from .attempts_collection import AttemptsCollection
from .routines import Routines
from .routines_collection import RoutinesCollection
from .attempts_logs import AttemptsLogs
from .attempts_logs_collection import AttemptsLogsCollection
from .root import Root
__all__ = ['Attempts',
'AttemptsCollection',
'AttemptsLogs',
'AttemptsLogsCollection',
'Base',
'BaseCollection',
'HabitGroups',
'HabitGroupsCollection',
'Habits',
'HabitsCollection',
'Root',
'Routines',
'RoutinesCollection',
'Users',
'UsersCollection']
| dnguyen0304/mfit_service | mfit/mfit/resources/__init__.py | Python | mit | 1,041 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/inventory/shared_creature_inventory_6.iff"
result.attribute_template_id = -1
result.stfName("item_n","inventory")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/tangible/inventory/shared_creature_inventory_6.py | Python | mit | 450 |
""" FileDialogDelegateQt.py: Delegate that pops up a file dialog when double clicked.
Sets the model data to the selected file name.
"""
import os.path
try:
from PyQt5.QtCore import Qt, QT_VERSION_STR
from PyQt5.QtWidgets import QStyledItemDelegate, QFileDialog
except ImportError:
try:
from PyQt4.QtCore import Qt, QT_VERSION_STR
from PyQt4.QtGui import QStyledItemDelegate, QFileDialog
except ImportError:
raise ImportError("FileDialogDelegateQt: Requires PyQt5 or PyQt4.")
__author__ = "Marcel Goldschen-Ohm <[email protected]>"
class FileDialogDelegateQt(QStyledItemDelegate):
""" Delegate that pops up a file dialog when double clicked.
Sets the model data to the selected file name.
"""
def __init__(self, parent=None):
QStyledItemDelegate.__init__(self, parent)
def createEditor(self, parent, option, index):
""" Instead of creating an editor, just popup a modal file dialog
and set the model data to the selected file name, if any.
"""
pathToFileName = ""
if QT_VERSION_STR[0] == '4':
pathToFileName = QFileDialog.getOpenFileName(None, "Open")
elif QT_VERSION_STR[0] == '5':
pathToFileName, temp = QFileDialog.getOpenFileName(None, "Open")
pathToFileName = str(pathToFileName) # QString ==> str
if len(pathToFileName):
index.model().setData(index, pathToFileName, Qt.EditRole)
index.model().dataChanged.emit(index, index) # Tell model to update cell display.
return None
def displayText(self, value, locale):
""" Show file name without path.
"""
try:
if QT_VERSION_STR[0] == '4':
pathToFileName = str(value.toString()) # QVariant ==> str
elif QT_VERSION_STR[0] == '5':
pathToFileName = str(value)
path, fileName = os.path.split(pathToFileName)
return fileName
except:
return ""
| marcel-goldschen-ohm/ModelViewPyQt | FileDialogDelegateQt.py | Python | mit | 2,023 |
import sys
import pprint
class Reference(object):
def __init__(self, tb_index, varname, target):
self.tb_index = tb_index
self.varname = varname
self.target = target
def marker(self, xtb, tb_index, key):
return Marker(self, xtb, tb_index, key)
class Marker(object):
def __init__(self, reference, xtb, tb_index, key):
self.reference = reference
self.xtb = xtb
self.tb_index = tb_index
self.key = key
self.tb_offset = self.reference.tb_index - self.tb_index
def __repr__(self):
frame = sys._getframe(1)
while frame:
try:
code = self.xtb._format_variable.func_code
except AttributeError:
# python 3
code = self.xtb._format_variable.__code__
if frame.f_code == code:
indent = frame.f_locals["indent"] + 4
break
frame = frame.f_back
else: # pragma: no cover - defensive
raise RuntimeError("Expecting to be called with "
"XTraceback._format_variable in stack")
pretty_repr = pprint.pformat(self.reference.target)
if indent + len(self.key) + len(pretty_repr) > self.xtb.print_width \
or pretty_repr.find("\n") > 0:
name = "" if self.reference.varname == self.key \
else " name=%s" % self.reference.varname
pretty_repr = "<ref offset=%d%s>" % (self.tb_offset, name)
return pretty_repr
| Hypernode/xtraceback | xtraceback/reference.py | Python | mit | 1,552 |
# -*- coding: utf-8 -*-
"""
This scripts sets an initial layout for the ProEMOnline software. It uses the
PyQtGraph dockarea system and was designed from the dockarea.py example.
Contains:
Left column: Observing Log
Center column: Plots
Right column: Images and Process Log
Menu bar
"""
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.console
import numpy as np
import math
import astropy.io.fits as fits
from pyqtgraph.dockarea import *
#This program operates in four stages.
#Stage 0 - Program Initialized, waiting to open SPE file.
#Stage 1 - SPE file open, stars are being selected
#Stage 2 - Online data reduction and aperture photometry/plotting is done.
#Stage 3 - End of data acquisition detected. Final data written to file. Timestamps verified. Log saved. Weather/time log data saved.
# -> revert back to Stage 0.
stage=0 #start at 0
#Keep track of the current frame:
#One version that we do science on
#One version for display purposes
def newframe(fitsfile):
"""For given filename, return science and display images.
"""
img = fits.getdata(fitsfile)[0]
displayimg = np.copy(img)
#replace everything above 99%tile
#don't do calulcations on this adjusted array!!!
imgvals = displayimg.flatten()
img99percentile = np.percentile(imgvals,99)
displayimg[displayimg > img99percentile] = img99percentile
#make color
displayimg=np.array([displayimg,displayimg,displayimg]).transpose()
return img,displayimg
#Start with some initial example file
fitsfile = 'ProEMExample.fits' #initial file
img,displayimg = newframe(fitsfile)
#Use a function to display a new image
#Autoscaling levels optional
def displayframe(displayimg,autoscale=False):
"""Display an RBG image
Autoscale optional.
Return nothing.
"""
if autoscale:
w5.setImage(displayimg,autoRange=True,levels=[np.min(displayimg),np.max(displayimg)-1])
else:
w5.setImage(displayimg,autoRange=False,autoLevels=False)
#Set up a list to keep track of star positions
starpos=[]
#Open File functionality
class WithMenu(QtGui.QMainWindow):
def __init__(self):
super(WithMenu, self).__init__()
self.initUI()
def initUI(self):
#Note: Exit is protected on Mac. This may work on Windows.
exitAction = QtGui.QAction('Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.showDialog)
openFile = QtGui.QAction('Open', self)
openFile.setShortcut('Ctrl+O')
openFile.setStatusTip('Open new File')
openFile.triggered.connect(self.showDialog)
menubar = self.menuBar()
fileMenu = menubar.addMenu('File')
fileMenu.addAction(openFile)
fileMenu.addAction(exitAction)
def showDialog(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file',
'/home')
#print str(fname)
img = fits.getdata(str(fname))[0]
w5.setImage(img)
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Message',
"Really quit?", QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
app = QtGui.QApplication([])
win = WithMenu()
area = DockArea()
win.setCentralWidget(area)
win.resize(1200,600)
win.setWindowTitle('ProEM Online Data Analysis Demo')
## Create docks, place them into the window one at a time.
## Note that size arguments are only a suggestion; docks will still have to
## fill the entire dock area and obey the limits of their internal widgets.
d1 = Dock("Dock1 - Observing Log", size=(500,300))
d2 = Dock("Dock2 - Process Log", size=(500,300))
d3 = Dock("Dock3 - Fourier Transform", size=(500,400))
d4 = Dock("Dock4 (tabbed) - Smoothed", size=(500,200))
d5 = Dock("Dock5 - Image", size=(500,200))
d6 = Dock("Dock6 (tabbed) - Light Curve", size=(500,200))
d7 = Dock("Dock7 (tabbed) - Comparison Counts", size=(500,200))
d8 = Dock("Dock8 (tabbed) - Seeing", size=(500,200))
area.addDock(d1, 'left') ## place d1 at left edge of dock area (it will fill the whole space since there are no other docks yet)
area.addDock(d2, 'right') ## place d2 at right edge of dock area
area.addDock(d3, 'left', d2)## place d3 at the left edge of d2
area.addDock(d4, 'top',d3) ## place d4 on top d3
area.addDock(d5, 'top',d2) ## place d5 on top d2
area.addDock(d6, 'above', d4) ## place d6 above d4
area.addDock(d7, 'top', d3)
area.addDock(d8, 'above', d7)
## Add widgets into each dock
## First dock holds the Observing Log
w1 = pg.LayoutWidget()
observer = QtGui.QLabel('Observer')
target = QtGui.QLabel('Target')
filt = QtGui.QLabel('Filter')
log = QtGui.QLabel('Log')
observerEdit = QtGui.QLineEdit()
targetEdit = QtGui.QLineEdit()
filtEdit = QtGui.QComboBox()
filtEdit.addItems(["BG40","u'","g'","r'","i'","z'","Other"])
logEdit = QtGui.QTextEdit()
w1.addWidget(observer, 1, 0)
w1.addWidget(observerEdit, 1, 1)
w1.addWidget(target, 2, 0)
w1.addWidget(targetEdit, 2, 1)
w1.addWidget(filt, 3, 0)
w1.addWidget(filtEdit, 3, 1)
w1.addWidget(log, 4, 0)
w1.addWidget(logEdit, 4, 1, 6, 1)
d1.addWidget(w1)
## Process Log
w2 = pg.LayoutWidget()
processLog = QtGui.QTextEdit()
processLog.setReadOnly(True)
#processLog.setTextBackgroundColor(QtGui.QColor("black"))
w2.addWidget(processLog, 0, 0, 6, 1)
d2.addWidget(w2)
## Fourier Transform - Just shows random updating noise for now
w3 = pg.PlotWidget(title="Fourier Transform")
curve = w3.plot(pen='y')
data = np.random.normal(size=(10,1000))
ptr = 0
def update():
global curve, data, ptr, w3
curve.setData(data[ptr%10])
if ptr == 0:
w3.enableAutoRange('xy', False) ## stop auto-scaling after the first data set is plotted
ptr += 1
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(50)
d3.addWidget(w3)
## Smoothed Light Curve
w4 = pg.PlotWidget(title="Dock 4 plot")
w4.plot(np.random.normal(size=100))
d4.addWidget(w4)
## Image
w5 = pg.ImageView()
w5.ui.roiBtn.hide()
w5.ui.normBtn.hide()
displayframe(displayimg,autoscale=True)
def click(event):
event.accept()
pos = event.pos()
#check if we're marking or unmarking a star
#if pos.
starpos.append([pos.x(),pos.y()])
#img[pos.x(),pos.y()]=[255,255-img[pos.x(),pos.y(),1],255-img[pos.x(),pos.y(),1]]
#w5.setImage(img,autoRange=False)
processLog.append("Star selected at "+str( (int(pos.x()),int(pos.y())) ))
w5.getImageItem().mouseClickEvent = click
d5.addWidget(w5)
## Light Curve
w6 = pg.PlotWidget(title="Dock 6 plot")
w6.plot(np.random.normal(size=100))
d6.addWidget(w6)
## Smoothed Light Curve
w7 = pg.PlotWidget(title="Dock 7 plot")
w7.plot(np.random.normal(size=100))
d7.addWidget(w7)
## Smoothed Light Curve
w8 = pg.PlotWidget(title="Dock 8 plot")
w8.plot(np.random.normal(size=100))
d8.addWidget(w8)
win.show()
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| ccd-utexas/ProEMOnline | layout.py | Python | mit | 7,341 |
import _plotly_utils.basevalidators
class PadValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="pad", parent_name="layout.title", **kwargs):
super(PadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Pad"),
data_docs=kwargs.pop(
"data_docs",
"""
b
The amount of padding (in px) along the bottom
of the component.
l
The amount of padding (in px) on the left side
of the component.
r
The amount of padding (in px) on the right side
of the component.
t
The amount of padding (in px) along the top of
the component.
""",
),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/layout/title/_pad.py | Python | mit | 945 |
from sentence import Sentence
from textblob import TextBlob
from itertools import chain
from collections import Counter
def findSubject(lines):
sentences = []
if len(lines) == 0:
print "messages are empty"
return None
for m in lines:
sentences.append(Sentence(m).nouns)
if len(sentences) != 0:
maxNoun = Counter(list(chain(*sentences))).most_common()[0]
else:
print "No nouns"
return None
if maxNoun[1] >= 2:
return maxNoun[0].replace(" ","_")
else:
return None
| ZacharyJacobCollins/Fallen | tagger.py | Python | mit | 554 |
import xml.sax
import unittest
import test_utils
import xmlreader
import os
path = os.path.dirname(os.path.abspath(__file__) )
class XmlReaderTestCase(unittest.TestCase):
def test_XmlDumpAllRevs(self):
pages = [r for r in xmlreader.XmlDump(path + "/data/article-pear.xml", allrevisions=True).parse()]
self.assertEquals(4, len(pages))
self.assertEquals(u"Automated conversion", pages[0].comment)
self.assertEquals(u"Pear", pages[0].title)
self.assertEquals(u"24278", pages[0].id)
self.assertTrue(pages[0].text.startswith('Pears are [[tree]]s of'))
self.assertEquals(u"Quercusrobur", pages[1].username)
self.assertEquals(u"Pear", pages[0].title)
def test_XmlDumpFirstRev(self):
pages = [r for r in xmlreader.XmlDump(path + "/data/article-pear.xml").parse()]
self.assertEquals(1, len(pages))
self.assertEquals(u"Automated conversion", pages[0].comment)
self.assertEquals(u"Pear", pages[0].title)
self.assertEquals(u"24278", pages[0].id)
self.assertTrue(pages[0].text.startswith('Pears are [[tree]]s of'))
self.assertTrue(not pages[0].isredirect)
def test_XmlDumpRedirect(self):
pages = [r for r in xmlreader.XmlDump(path + "/data/article-pyrus.xml").parse()]
self.assertTrue(pages[0].isredirect)
def test_MediaWikiXmlHandler(self):
handler = xmlreader.MediaWikiXmlHandler()
pages = []
def pageDone(page):
pages.append(page)
handler.setCallback(pageDone)
xml.sax.parse(path + "/data/article-pear.xml", handler)
self.assertEquals(u"Pear", pages[0].title)
self.assertEquals(4, len(pages))
self.assertNotEquals("", pages[0].comment)
if __name__ == '__main__':
unittest.main()
| races1986/SafeLanguage | CEM/tests/test_xmlreader.py | Python | epl-1.0 | 1,809 |
#!/usr/bin/python
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Burnin program
"""
import sys
import optparse
import time
import socket
import urllib
from itertools import izip, islice, cycle
from cStringIO import StringIO
from ganeti import opcodes
from ganeti import constants
from ganeti import cli
from ganeti import errors
from ganeti import utils
from ganeti import hypervisor
from ganeti import compat
from ganeti import pathutils
from ganeti.confd import client as confd_client
USAGE = ("\tburnin -o OS_NAME [options...] instance_name ...")
MAX_RETRIES = 3
LOG_HEADERS = {
0: "- ",
1: "* ",
2: "",
}
#: Disk templates supporting a single node
_SINGLE_NODE_DISK_TEMPLATES = compat.UniqueFrozenset([
constants.DT_DISKLESS,
constants.DT_PLAIN,
constants.DT_FILE,
constants.DT_SHARED_FILE,
constants.DT_EXT,
constants.DT_RBD,
])
_SUPPORTED_DISK_TEMPLATES = compat.UniqueFrozenset([
constants.DT_DISKLESS,
constants.DT_DRBD8,
constants.DT_EXT,
constants.DT_FILE,
constants.DT_PLAIN,
constants.DT_RBD,
constants.DT_SHARED_FILE,
])
#: Disk templates for which import/export is tested
_IMPEXP_DISK_TEMPLATES = (_SUPPORTED_DISK_TEMPLATES - frozenset([
constants.DT_DISKLESS,
constants.DT_FILE,
constants.DT_SHARED_FILE,
]))
class InstanceDown(Exception):
"""The checked instance was not up"""
class BurninFailure(Exception):
"""Failure detected during burning"""
def Usage():
"""Shows program usage information and exits the program."""
print >> sys.stderr, "Usage:"
print >> sys.stderr, USAGE
sys.exit(2)
def Log(msg, *args, **kwargs):
"""Simple function that prints out its argument.
"""
if args:
msg = msg % args
indent = kwargs.get("indent", 0)
sys.stdout.write("%*s%s%s\n" % (2 * indent, "",
LOG_HEADERS.get(indent, " "), msg))
sys.stdout.flush()
def Err(msg, exit_code=1):
"""Simple error logging that prints to stderr.
"""
sys.stderr.write(msg + "\n")
sys.stderr.flush()
sys.exit(exit_code)
class SimpleOpener(urllib.FancyURLopener):
"""A simple url opener"""
# pylint: disable=W0221
def prompt_user_passwd(self, host, realm, clear_cache=0):
"""No-interaction version of prompt_user_passwd."""
# we follow parent class' API
# pylint: disable=W0613
return None, None
def http_error_default(self, url, fp, errcode, errmsg, headers):
"""Custom error handling"""
# make sure sockets are not left in CLOSE_WAIT, this is similar
# but with a different exception to the BasicURLOpener class
_ = fp.read() # throw away data
fp.close()
raise InstanceDown("HTTP error returned: code %s, msg %s" %
(errcode, errmsg))
OPTIONS = [
cli.cli_option("-o", "--os", dest="os", default=None,
help="OS to use during burnin",
metavar="<OS>",
completion_suggest=cli.OPT_COMPL_ONE_OS),
cli.HYPERVISOR_OPT,
cli.OSPARAMS_OPT,
cli.cli_option("--disk-size", dest="disk_size",
help="Disk size (determines disk count)",
default="128m", type="string", metavar="<size,size,...>",
completion_suggest=("128M 512M 1G 4G 1G,256M"
" 4G,1G,1G 10G").split()),
cli.cli_option("--disk-growth", dest="disk_growth", help="Disk growth",
default="128m", type="string", metavar="<size,size,...>"),
cli.cli_option("--mem-size", dest="mem_size", help="Memory size",
default=None, type="unit", metavar="<size>",
completion_suggest=("128M 256M 512M 1G 4G 8G"
" 12G 16G").split()),
cli.cli_option("--maxmem-size", dest="maxmem_size", help="Max Memory size",
default=256, type="unit", metavar="<size>",
completion_suggest=("128M 256M 512M 1G 4G 8G"
" 12G 16G").split()),
cli.cli_option("--minmem-size", dest="minmem_size", help="Min Memory size",
default=128, type="unit", metavar="<size>",
completion_suggest=("128M 256M 512M 1G 4G 8G"
" 12G 16G").split()),
cli.cli_option("--vcpu-count", dest="vcpu_count", help="VCPU count",
default=3, type="unit", metavar="<count>",
completion_suggest=("1 2 3 4").split()),
cli.DEBUG_OPT,
cli.VERBOSE_OPT,
cli.NOIPCHECK_OPT,
cli.NONAMECHECK_OPT,
cli.EARLY_RELEASE_OPT,
cli.cli_option("--no-replace1", dest="do_replace1",
help="Skip disk replacement with the same secondary",
action="store_false", default=True),
cli.cli_option("--no-replace2", dest="do_replace2",
help="Skip disk replacement with a different secondary",
action="store_false", default=True),
cli.cli_option("--no-failover", dest="do_failover",
help="Skip instance failovers", action="store_false",
default=True),
cli.cli_option("--no-migrate", dest="do_migrate",
help="Skip instance live migration",
action="store_false", default=True),
cli.cli_option("--no-move", dest="do_move",
help="Skip instance moves", action="store_false",
default=True),
cli.cli_option("--no-importexport", dest="do_importexport",
help="Skip instance export/import", action="store_false",
default=True),
cli.cli_option("--no-startstop", dest="do_startstop",
help="Skip instance stop/start", action="store_false",
default=True),
cli.cli_option("--no-reinstall", dest="do_reinstall",
help="Skip instance reinstall", action="store_false",
default=True),
cli.cli_option("--no-reboot", dest="do_reboot",
help="Skip instance reboot", action="store_false",
default=True),
cli.cli_option("--no-renamesame", dest="do_renamesame",
help="Skip instance rename to same name", action="store_false",
default=True),
cli.cli_option("--reboot-types", dest="reboot_types",
help="Specify the reboot types", default=None),
cli.cli_option("--no-activate-disks", dest="do_activate_disks",
help="Skip disk activation/deactivation",
action="store_false", default=True),
cli.cli_option("--no-add-disks", dest="do_addremove_disks",
help="Skip disk addition/removal",
action="store_false", default=True),
cli.cli_option("--no-add-nics", dest="do_addremove_nics",
help="Skip NIC addition/removal",
action="store_false", default=True),
cli.cli_option("--no-nics", dest="nics",
help="No network interfaces", action="store_const",
const=[], default=[{}]),
cli.cli_option("--no-confd", dest="do_confd_tests",
help="Skip confd queries",
action="store_false", default=constants.ENABLE_CONFD),
cli.cli_option("--rename", dest="rename", default=None,
help=("Give one unused instance name which is taken"
" to start the renaming sequence"),
metavar="<instance_name>"),
cli.cli_option("-t", "--disk-template", dest="disk_template",
choices=list(_SUPPORTED_DISK_TEMPLATES),
default=constants.DT_DRBD8,
help=("Disk template (default %s, otherwise one of %s)" %
(constants.DT_DRBD8,
utils.CommaJoin(_SUPPORTED_DISK_TEMPLATES)))),
cli.cli_option("-n", "--nodes", dest="nodes", default="",
help=("Comma separated list of nodes to perform"
" the burnin on (defaults to all nodes)"),
completion_suggest=cli.OPT_COMPL_MANY_NODES),
cli.cli_option("-I", "--iallocator", dest="iallocator",
default=None, type="string",
help=("Perform the allocation using an iallocator"
" instead of fixed node spread (node restrictions no"
" longer apply, therefore -n/--nodes must not be"
" used"),
completion_suggest=cli.OPT_COMPL_ONE_IALLOCATOR),
cli.cli_option("-p", "--parallel", default=False, action="store_true",
dest="parallel",
help=("Enable parallelization of some operations in"
" order to speed burnin or to test granular locking")),
cli.cli_option("--net-timeout", default=15, type="int",
dest="net_timeout",
help=("The instance check network timeout in seconds"
" (defaults to 15 seconds)"),
completion_suggest="15 60 300 900".split()),
cli.cli_option("-C", "--http-check", default=False, action="store_true",
dest="http_check",
help=("Enable checking of instance status via http,"
" looking for /hostname.txt that should contain the"
" name of the instance")),
cli.cli_option("-K", "--keep-instances", default=False,
action="store_true",
dest="keep_instances",
help=("Leave instances on the cluster after burnin,"
" for investigation in case of errors or simply"
" to use them")),
cli.REASON_OPT,
]
# Mainly used for bash completion
ARGUMENTS = [cli.ArgInstance(min=1)]
def _DoCheckInstances(fn):
"""Decorator for checking instances.
"""
def wrapper(self, *args, **kwargs):
val = fn(self, *args, **kwargs)
for instance in self.instances:
self._CheckInstanceAlive(instance) # pylint: disable=W0212
return val
return wrapper
def _DoBatch(retry):
"""Decorator for possible batch operations.
Must come after the _DoCheckInstances decorator (if any).
@param retry: whether this is a retryable batch, will be
passed to StartBatch
"""
def wrap(fn):
def batched(self, *args, **kwargs):
self.StartBatch(retry)
val = fn(self, *args, **kwargs)
self.CommitQueue()
return val
return batched
return wrap
class Burner(object):
"""Burner class."""
def __init__(self):
"""Constructor."""
self.url_opener = SimpleOpener()
self._feed_buf = StringIO()
self.nodes = []
self.instances = []
self.to_rem = []
self.queued_ops = []
self.opts = None
self.queue_retry = False
self.disk_count = self.disk_growth = self.disk_size = None
self.hvp = self.bep = None
self.ParseOptions()
self.cl = cli.GetClient()
self.GetState()
def ClearFeedbackBuf(self):
"""Clear the feedback buffer."""
self._feed_buf.truncate(0)
def GetFeedbackBuf(self):
"""Return the contents of the buffer."""
return self._feed_buf.getvalue()
def Feedback(self, msg):
"""Acumulate feedback in our buffer."""
formatted_msg = "%s %s" % (time.ctime(utils.MergeTime(msg[0])), msg[2])
self._feed_buf.write(formatted_msg + "\n")
if self.opts.verbose:
Log(formatted_msg, indent=3)
def MaybeRetry(self, retry_count, msg, fn, *args):
"""Possibly retry a given function execution.
@type retry_count: int
@param retry_count: retry counter:
- 0: non-retryable action
- 1: last retry for a retryable action
- MAX_RETRIES: original try for a retryable action
@type msg: str
@param msg: the kind of the operation
@type fn: callable
@param fn: the function to be called
"""
try:
val = fn(*args)
if retry_count > 0 and retry_count < MAX_RETRIES:
Log("Idempotent %s succeeded after %d retries",
msg, MAX_RETRIES - retry_count)
return val
except Exception, err: # pylint: disable=W0703
if retry_count == 0:
Log("Non-idempotent %s failed, aborting", msg)
raise
elif retry_count == 1:
Log("Idempotent %s repeated failure, aborting", msg)
raise
else:
Log("Idempotent %s failed, retry #%d/%d: %s",
msg, MAX_RETRIES - retry_count + 1, MAX_RETRIES, err)
self.MaybeRetry(retry_count - 1, msg, fn, *args)
def _ExecOp(self, *ops):
"""Execute one or more opcodes and manage the exec buffer.
@return: if only opcode has been passed, we return its result;
otherwise we return the list of results
"""
job_id = cli.SendJob(ops, cl=self.cl)
results = cli.PollJob(job_id, cl=self.cl, feedback_fn=self.Feedback)
if len(ops) == 1:
return results[0]
else:
return results
def ExecOp(self, retry, *ops):
"""Execute one or more opcodes and manage the exec buffer.
@return: if only opcode has been passed, we return its result;
otherwise we return the list of results
"""
if retry:
rval = MAX_RETRIES
else:
rval = 0
cli.SetGenericOpcodeOpts(ops, self.opts)
return self.MaybeRetry(rval, "opcode", self._ExecOp, *ops)
def ExecOrQueue(self, name, ops, post_process=None):
"""Execute an opcode and manage the exec buffer."""
if self.opts.parallel:
cli.SetGenericOpcodeOpts(ops, self.opts)
self.queued_ops.append((ops, name, post_process))
else:
val = self.ExecOp(self.queue_retry, *ops) # pylint: disable=W0142
if post_process is not None:
post_process()
return val
def StartBatch(self, retry):
"""Start a new batch of jobs.
@param retry: whether this is a retryable batch
"""
self.queued_ops = []
self.queue_retry = retry
def CommitQueue(self):
"""Execute all submitted opcodes in case of parallel burnin"""
if not self.opts.parallel or not self.queued_ops:
return
if self.queue_retry:
rval = MAX_RETRIES
else:
rval = 0
try:
results = self.MaybeRetry(rval, "jobset", self.ExecJobSet,
self.queued_ops)
finally:
self.queued_ops = []
return results
def ExecJobSet(self, jobs):
"""Execute a set of jobs and return once all are done.
The method will return the list of results, if all jobs are
successful. Otherwise, OpExecError will be raised from within
cli.py.
"""
self.ClearFeedbackBuf()
jex = cli.JobExecutor(cl=self.cl, feedback_fn=self.Feedback)
for ops, name, _ in jobs:
jex.QueueJob(name, *ops) # pylint: disable=W0142
try:
results = jex.GetResults()
except Exception, err: # pylint: disable=W0703
Log("Jobs failed: %s", err)
raise BurninFailure()
fail = False
val = []
for (_, name, post_process), (success, result) in zip(jobs, results):
if success:
if post_process:
try:
post_process()
except Exception, err: # pylint: disable=W0703
Log("Post process call for job %s failed: %s", name, err)
fail = True
val.append(result)
else:
fail = True
if fail:
raise BurninFailure()
return val
def ParseOptions(self):
"""Parses the command line options.
In case of command line errors, it will show the usage and exit the
program.
"""
parser = optparse.OptionParser(usage="\n%s" % USAGE,
version=("%%prog (ganeti) %s" %
constants.RELEASE_VERSION),
option_list=OPTIONS)
options, args = parser.parse_args()
if len(args) < 1 or options.os is None:
Usage()
if options.mem_size:
options.maxmem_size = options.mem_size
options.minmem_size = options.mem_size
elif options.minmem_size > options.maxmem_size:
Err("Maximum memory lower than minimum memory")
if options.disk_template not in _SUPPORTED_DISK_TEMPLATES:
Err("Unknown or unsupported disk template '%s'" % options.disk_template)
if options.disk_template == constants.DT_DISKLESS:
disk_size = disk_growth = []
options.do_addremove_disks = False
else:
disk_size = [utils.ParseUnit(v) for v in options.disk_size.split(",")]
disk_growth = [utils.ParseUnit(v)
for v in options.disk_growth.split(",")]
if len(disk_growth) != len(disk_size):
Err("Wrong disk sizes/growth combination")
if ((disk_size and options.disk_template == constants.DT_DISKLESS) or
(not disk_size and options.disk_template != constants.DT_DISKLESS)):
Err("Wrong disk count/disk template combination")
self.disk_size = disk_size
self.disk_growth = disk_growth
self.disk_count = len(disk_size)
if options.nodes and options.iallocator:
Err("Give either the nodes option or the iallocator option, not both")
if options.http_check and not options.name_check:
Err("Can't enable HTTP checks without name checks")
self.opts = options
self.instances = args
self.bep = {
constants.BE_MINMEM: options.minmem_size,
constants.BE_MAXMEM: options.maxmem_size,
constants.BE_VCPUS: options.vcpu_count,
}
self.hypervisor = None
self.hvp = {}
if options.hypervisor:
self.hypervisor, self.hvp = options.hypervisor
if options.reboot_types is None:
options.reboot_types = constants.REBOOT_TYPES
else:
options.reboot_types = options.reboot_types.split(",")
rt_diff = set(options.reboot_types).difference(constants.REBOOT_TYPES)
if rt_diff:
Err("Invalid reboot types specified: %s" % utils.CommaJoin(rt_diff))
socket.setdefaulttimeout(options.net_timeout)
def GetState(self):
"""Read the cluster state from the master daemon."""
if self.opts.nodes:
names = self.opts.nodes.split(",")
else:
names = []
try:
op = opcodes.OpNodeQuery(output_fields=["name", "offline", "drained"],
names=names, use_locking=True)
result = self.ExecOp(True, op)
except errors.GenericError, err:
err_code, msg = cli.FormatError(err)
Err(msg, exit_code=err_code)
self.nodes = [data[0] for data in result if not (data[1] or data[2])]
op_diagnose = opcodes.OpOsDiagnose(output_fields=["name",
"variants",
"hidden"],
names=[])
result = self.ExecOp(True, op_diagnose)
if not result:
Err("Can't get the OS list")
found = False
for (name, variants, _) in result:
if self.opts.os in cli.CalculateOSNames(name, variants):
found = True
break
if not found:
Err("OS '%s' not found" % self.opts.os)
cluster_info = self.cl.QueryClusterInfo()
self.cluster_info = cluster_info
if not self.cluster_info:
Err("Can't get cluster info")
default_nic_params = self.cluster_info["nicparams"][constants.PP_DEFAULT]
self.cluster_default_nicparams = default_nic_params
if self.hypervisor is None:
self.hypervisor = self.cluster_info["default_hypervisor"]
self.hv_can_migrate = \
hypervisor.GetHypervisorClass(self.hypervisor).CAN_MIGRATE
@_DoCheckInstances
@_DoBatch(False)
def BurnCreateInstances(self):
"""Create the given instances.
"""
self.to_rem = []
mytor = izip(cycle(self.nodes),
islice(cycle(self.nodes), 1, None),
self.instances)
Log("Creating instances")
for pnode, snode, instance in mytor:
Log("instance %s", instance, indent=1)
if self.opts.iallocator:
pnode = snode = None
msg = "with iallocator %s" % self.opts.iallocator
elif self.opts.disk_template not in constants.DTS_INT_MIRROR:
snode = None
msg = "on %s" % pnode
else:
msg = "on %s, %s" % (pnode, snode)
Log(msg, indent=2)
op = opcodes.OpInstanceCreate(instance_name=instance,
disks=[{"size": size}
for size in self.disk_size],
disk_template=self.opts.disk_template,
nics=self.opts.nics,
mode=constants.INSTANCE_CREATE,
os_type=self.opts.os,
pnode=pnode,
snode=snode,
start=True,
ip_check=self.opts.ip_check,
name_check=self.opts.name_check,
wait_for_sync=True,
file_driver="loop",
file_storage_dir=None,
iallocator=self.opts.iallocator,
beparams=self.bep,
hvparams=self.hvp,
hypervisor=self.hypervisor,
osparams=self.opts.osparams,
)
remove_instance = lambda name: lambda: self.to_rem.append(name)
self.ExecOrQueue(instance, [op], post_process=remove_instance(instance))
@_DoBatch(False)
def BurnModifyRuntimeMemory(self):
"""Alter the runtime memory."""
Log("Setting instance runtime memory")
for instance in self.instances:
Log("instance %s", instance, indent=1)
tgt_mem = self.bep[constants.BE_MINMEM]
op = opcodes.OpInstanceSetParams(instance_name=instance,
runtime_mem=tgt_mem)
Log("Set memory to %s MB", tgt_mem, indent=2)
self.ExecOrQueue(instance, [op])
@_DoBatch(False)
def BurnGrowDisks(self):
"""Grow both the os and the swap disks by the requested amount, if any."""
Log("Growing disks")
for instance in self.instances:
Log("instance %s", instance, indent=1)
for idx, growth in enumerate(self.disk_growth):
if growth > 0:
op = opcodes.OpInstanceGrowDisk(instance_name=instance, disk=idx,
amount=growth, wait_for_sync=True)
Log("increase disk/%s by %s MB", idx, growth, indent=2)
self.ExecOrQueue(instance, [op])
@_DoBatch(True)
def BurnReplaceDisks1D8(self):
"""Replace disks on primary and secondary for drbd8."""
Log("Replacing disks on the same nodes")
early_release = self.opts.early_release
for instance in self.instances:
Log("instance %s", instance, indent=1)
ops = []
for mode in constants.REPLACE_DISK_SEC, constants.REPLACE_DISK_PRI:
op = opcodes.OpInstanceReplaceDisks(instance_name=instance,
mode=mode,
disks=list(range(self.disk_count)),
early_release=early_release)
Log("run %s", mode, indent=2)
ops.append(op)
self.ExecOrQueue(instance, ops)
@_DoBatch(True)
def BurnReplaceDisks2(self):
"""Replace secondary node."""
Log("Changing the secondary node")
mode = constants.REPLACE_DISK_CHG
mytor = izip(islice(cycle(self.nodes), 2, None),
self.instances)
for tnode, instance in mytor:
Log("instance %s", instance, indent=1)
if self.opts.iallocator:
tnode = None
msg = "with iallocator %s" % self.opts.iallocator
else:
msg = tnode
op = opcodes.OpInstanceReplaceDisks(instance_name=instance,
mode=mode,
remote_node=tnode,
iallocator=self.opts.iallocator,
disks=[],
early_release=self.opts.early_release)
Log("run %s %s", mode, msg, indent=2)
self.ExecOrQueue(instance, [op])
@_DoCheckInstances
@_DoBatch(False)
def BurnFailover(self):
"""Failover the instances."""
Log("Failing over instances")
for instance in self.instances:
Log("instance %s", instance, indent=1)
op = opcodes.OpInstanceFailover(instance_name=instance,
ignore_consistency=False)
self.ExecOrQueue(instance, [op])
@_DoCheckInstances
@_DoBatch(False)
def BurnMove(self):
"""Move the instances."""
Log("Moving instances")
mytor = izip(islice(cycle(self.nodes), 1, None),
self.instances)
for tnode, instance in mytor:
Log("instance %s", instance, indent=1)
op = opcodes.OpInstanceMove(instance_name=instance,
target_node=tnode)
self.ExecOrQueue(instance, [op])
@_DoBatch(False)
def BurnMigrate(self):
"""Migrate the instances."""
Log("Migrating instances")
for instance in self.instances:
Log("instance %s", instance, indent=1)
op1 = opcodes.OpInstanceMigrate(instance_name=instance, mode=None,
cleanup=False)
op2 = opcodes.OpInstanceMigrate(instance_name=instance, mode=None,
cleanup=True)
Log("migration and migration cleanup", indent=2)
self.ExecOrQueue(instance, [op1, op2])
@_DoCheckInstances
@_DoBatch(False)
def BurnImportExport(self):
"""Export the instance, delete it, and import it back.
"""
Log("Exporting and re-importing instances")
mytor = izip(cycle(self.nodes),
islice(cycle(self.nodes), 1, None),
islice(cycle(self.nodes), 2, None),
self.instances)
for pnode, snode, enode, instance in mytor:
Log("instance %s", instance, indent=1)
# read the full name of the instance
nam_op = opcodes.OpInstanceQuery(output_fields=["name"],
names=[instance], use_locking=True)
full_name = self.ExecOp(False, nam_op)[0][0]
if self.opts.iallocator:
pnode = snode = None
import_log_msg = ("import from %s"
" with iallocator %s" %
(enode, self.opts.iallocator))
elif self.opts.disk_template not in constants.DTS_INT_MIRROR:
snode = None
import_log_msg = ("import from %s to %s" %
(enode, pnode))
else:
import_log_msg = ("import from %s to %s, %s" %
(enode, pnode, snode))
exp_op = opcodes.OpBackupExport(instance_name=instance,
target_node=enode,
mode=constants.EXPORT_MODE_LOCAL,
shutdown=True)
rem_op = opcodes.OpInstanceRemove(instance_name=instance,
ignore_failures=True)
imp_dir = utils.PathJoin(pathutils.EXPORT_DIR, full_name)
imp_op = opcodes.OpInstanceCreate(instance_name=instance,
disks=[{"size": size}
for size in self.disk_size],
disk_template=self.opts.disk_template,
nics=self.opts.nics,
mode=constants.INSTANCE_IMPORT,
src_node=enode,
src_path=imp_dir,
pnode=pnode,
snode=snode,
start=True,
ip_check=self.opts.ip_check,
name_check=self.opts.name_check,
wait_for_sync=True,
file_storage_dir=None,
file_driver="loop",
iallocator=self.opts.iallocator,
beparams=self.bep,
hvparams=self.hvp,
osparams=self.opts.osparams,
)
erem_op = opcodes.OpBackupRemove(instance_name=instance)
Log("export to node %s", enode, indent=2)
Log("remove instance", indent=2)
Log(import_log_msg, indent=2)
Log("remove export", indent=2)
self.ExecOrQueue(instance, [exp_op, rem_op, imp_op, erem_op])
@staticmethod
def StopInstanceOp(instance):
"""Stop given instance."""
return opcodes.OpInstanceShutdown(instance_name=instance)
@staticmethod
def StartInstanceOp(instance):
"""Start given instance."""
return opcodes.OpInstanceStartup(instance_name=instance, force=False)
@staticmethod
def RenameInstanceOp(instance, instance_new):
"""Rename instance."""
return opcodes.OpInstanceRename(instance_name=instance,
new_name=instance_new)
@_DoCheckInstances
@_DoBatch(True)
def BurnStopStart(self):
"""Stop/start the instances."""
Log("Stopping and starting instances")
for instance in self.instances:
Log("instance %s", instance, indent=1)
op1 = self.StopInstanceOp(instance)
op2 = self.StartInstanceOp(instance)
self.ExecOrQueue(instance, [op1, op2])
@_DoBatch(False)
def BurnRemove(self):
"""Remove the instances."""
Log("Removing instances")
for instance in self.to_rem:
Log("instance %s", instance, indent=1)
op = opcodes.OpInstanceRemove(instance_name=instance,
ignore_failures=True)
self.ExecOrQueue(instance, [op])
def BurnRename(self):
"""Rename the instances.
Note that this function will not execute in parallel, since we
only have one target for rename.
"""
Log("Renaming instances")
rename = self.opts.rename
for instance in self.instances:
Log("instance %s", instance, indent=1)
op_stop1 = self.StopInstanceOp(instance)
op_stop2 = self.StopInstanceOp(rename)
op_rename1 = self.RenameInstanceOp(instance, rename)
op_rename2 = self.RenameInstanceOp(rename, instance)
op_start1 = self.StartInstanceOp(rename)
op_start2 = self.StartInstanceOp(instance)
self.ExecOp(False, op_stop1, op_rename1, op_start1)
self._CheckInstanceAlive(rename)
self.ExecOp(False, op_stop2, op_rename2, op_start2)
self._CheckInstanceAlive(instance)
@_DoCheckInstances
@_DoBatch(True)
def BurnReinstall(self):
"""Reinstall the instances."""
Log("Reinstalling instances")
for instance in self.instances:
Log("instance %s", instance, indent=1)
op1 = self.StopInstanceOp(instance)
op2 = opcodes.OpInstanceReinstall(instance_name=instance)
Log("reinstall without passing the OS", indent=2)
op3 = opcodes.OpInstanceReinstall(instance_name=instance,
os_type=self.opts.os)
Log("reinstall specifying the OS", indent=2)
op4 = self.StartInstanceOp(instance)
self.ExecOrQueue(instance, [op1, op2, op3, op4])
@_DoCheckInstances
@_DoBatch(True)
def BurnReboot(self):
"""Reboot the instances."""
Log("Rebooting instances")
for instance in self.instances:
Log("instance %s", instance, indent=1)
ops = []
for reboot_type in self.opts.reboot_types:
op = opcodes.OpInstanceReboot(instance_name=instance,
reboot_type=reboot_type,
ignore_secondaries=False)
Log("reboot with type '%s'", reboot_type, indent=2)
ops.append(op)
self.ExecOrQueue(instance, ops)
@_DoCheckInstances
@_DoBatch(True)
def BurnRenameSame(self):
"""Rename the instances to their own name."""
Log("Renaming the instances to their own name")
for instance in self.instances:
Log("instance %s", instance, indent=1)
op1 = self.StopInstanceOp(instance)
op2 = self.RenameInstanceOp(instance, instance)
Log("rename to the same name", indent=2)
op4 = self.StartInstanceOp(instance)
self.ExecOrQueue(instance, [op1, op2, op4])
@_DoCheckInstances
@_DoBatch(True)
def BurnActivateDisks(self):
"""Activate and deactivate disks of the instances."""
Log("Activating/deactivating disks")
for instance in self.instances:
Log("instance %s", instance, indent=1)
op_start = self.StartInstanceOp(instance)
op_act = opcodes.OpInstanceActivateDisks(instance_name=instance)
op_deact = opcodes.OpInstanceDeactivateDisks(instance_name=instance)
op_stop = self.StopInstanceOp(instance)
Log("activate disks when online", indent=2)
Log("activate disks when offline", indent=2)
Log("deactivate disks (when offline)", indent=2)
self.ExecOrQueue(instance, [op_act, op_stop, op_act, op_deact, op_start])
@_DoCheckInstances
@_DoBatch(False)
def BurnAddRemoveDisks(self):
"""Add and remove an extra disk for the instances."""
Log("Adding and removing disks")
for instance in self.instances:
Log("instance %s", instance, indent=1)
op_add = opcodes.OpInstanceSetParams(
instance_name=instance,
disks=[(constants.DDM_ADD, {"size": self.disk_size[0]})])
op_rem = opcodes.OpInstanceSetParams(
instance_name=instance, disks=[(constants.DDM_REMOVE, {})])
op_stop = self.StopInstanceOp(instance)
op_start = self.StartInstanceOp(instance)
Log("adding a disk", indent=2)
Log("removing last disk", indent=2)
self.ExecOrQueue(instance, [op_add, op_stop, op_rem, op_start])
@_DoBatch(False)
def BurnAddRemoveNICs(self):
"""Add, change and remove an extra NIC for the instances."""
Log("Adding and removing NICs")
for instance in self.instances:
Log("instance %s", instance, indent=1)
op_add = opcodes.OpInstanceSetParams(
instance_name=instance, nics=[(constants.DDM_ADD, {})])
op_chg = opcodes.OpInstanceSetParams(
instance_name=instance, nics=[(constants.DDM_MODIFY,
-1, {"mac": constants.VALUE_GENERATE})])
op_rem = opcodes.OpInstanceSetParams(
instance_name=instance, nics=[(constants.DDM_REMOVE, {})])
Log("adding a NIC", indent=2)
Log("changing a NIC", indent=2)
Log("removing last NIC", indent=2)
self.ExecOrQueue(instance, [op_add, op_chg, op_rem])
def ConfdCallback(self, reply):
"""Callback for confd queries"""
if reply.type == confd_client.UPCALL_REPLY:
if reply.server_reply.status != constants.CONFD_REPL_STATUS_OK:
Err("Query %s gave non-ok status %s: %s" % (reply.orig_request,
reply.server_reply.status,
reply.server_reply))
if reply.orig_request.type == constants.CONFD_REQ_PING:
Log("Ping: OK", indent=1)
elif reply.orig_request.type == constants.CONFD_REQ_CLUSTER_MASTER:
if reply.server_reply.answer == self.cluster_info["master"]:
Log("Master: OK", indent=1)
else:
Err("Master: wrong: %s" % reply.server_reply.answer)
elif reply.orig_request.type == constants.CONFD_REQ_NODE_ROLE_BYNAME:
if reply.server_reply.answer == constants.CONFD_NODE_ROLE_MASTER:
Log("Node role for master: OK", indent=1)
else:
Err("Node role for master: wrong: %s" % reply.server_reply.answer)
def DoConfdRequestReply(self, req):
self.confd_counting_callback.RegisterQuery(req.rsalt)
self.confd_client.SendRequest(req, async=False)
while not self.confd_counting_callback.AllAnswered():
if not self.confd_client.ReceiveReply():
Err("Did not receive all expected confd replies")
break
def BurnConfd(self):
"""Run confd queries for our instances.
The following confd queries are tested:
- CONFD_REQ_PING: simple ping
- CONFD_REQ_CLUSTER_MASTER: cluster master
- CONFD_REQ_NODE_ROLE_BYNAME: node role, for the master
"""
Log("Checking confd results")
filter_callback = confd_client.ConfdFilterCallback(self.ConfdCallback)
counting_callback = confd_client.ConfdCountingCallback(filter_callback)
self.confd_counting_callback = counting_callback
self.confd_client = confd_client.GetConfdClient(counting_callback)
req = confd_client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.DoConfdRequestReply(req)
req = confd_client.ConfdClientRequest(
type=constants.CONFD_REQ_CLUSTER_MASTER)
self.DoConfdRequestReply(req)
req = confd_client.ConfdClientRequest(
type=constants.CONFD_REQ_NODE_ROLE_BYNAME,
query=self.cluster_info["master"])
self.DoConfdRequestReply(req)
def _CheckInstanceAlive(self, instance):
"""Check if an instance is alive by doing http checks.
This will try to retrieve the url on the instance /hostname.txt
and check that it contains the hostname of the instance. In case
we get ECONNREFUSED, we retry up to the net timeout seconds, for
any other error we abort.
"""
if not self.opts.http_check:
return
end_time = time.time() + self.opts.net_timeout
url = None
while time.time() < end_time and url is None:
try:
url = self.url_opener.open("http://%s/hostname.txt" % instance)
except IOError:
# here we can have connection refused, no route to host, etc.
time.sleep(1)
if url is None:
raise InstanceDown(instance, "Cannot contact instance")
hostname = url.read().strip()
url.close()
if hostname != instance:
raise InstanceDown(instance, ("Hostname mismatch, expected %s, got %s" %
(instance, hostname)))
def BurninCluster(self):
"""Test a cluster intensively.
This will create instances and then start/stop/failover them.
It is safe for existing instances but could impact performance.
"""
Log("Testing global parameters")
if (len(self.nodes) == 1 and
self.opts.disk_template not in _SINGLE_NODE_DISK_TEMPLATES):
Err("When one node is available/selected the disk template must"
" be one of %s" % utils.CommaJoin(_SINGLE_NODE_DISK_TEMPLATES))
if self.opts.do_confd_tests and not constants.ENABLE_CONFD:
Err("You selected confd tests but confd was disabled at configure time")
has_err = True
try:
self.BurnCreateInstances()
if self.bep[constants.BE_MINMEM] < self.bep[constants.BE_MAXMEM]:
self.BurnModifyRuntimeMemory()
if self.opts.do_replace1 and \
self.opts.disk_template in constants.DTS_INT_MIRROR:
self.BurnReplaceDisks1D8()
if (self.opts.do_replace2 and len(self.nodes) > 2 and
self.opts.disk_template in constants.DTS_INT_MIRROR):
self.BurnReplaceDisks2()
if (self.opts.disk_template in constants.DTS_GROWABLE and
compat.any(n > 0 for n in self.disk_growth)):
self.BurnGrowDisks()
if self.opts.do_failover and \
self.opts.disk_template in constants.DTS_MIRRORED:
self.BurnFailover()
if self.opts.do_migrate:
if self.opts.disk_template not in constants.DTS_MIRRORED:
Log("Skipping migration (disk template %s does not support it)",
self.opts.disk_template)
elif not self.hv_can_migrate:
Log("Skipping migration (hypervisor %s does not support it)",
self.hypervisor)
else:
self.BurnMigrate()
if (self.opts.do_move and len(self.nodes) > 1 and
self.opts.disk_template in [constants.DT_PLAIN, constants.DT_FILE]):
self.BurnMove()
if (self.opts.do_importexport and
self.opts.disk_template in _IMPEXP_DISK_TEMPLATES):
self.BurnImportExport()
if self.opts.do_reinstall:
self.BurnReinstall()
if self.opts.do_reboot:
self.BurnReboot()
if self.opts.do_renamesame:
self.BurnRenameSame()
if self.opts.do_addremove_disks:
self.BurnAddRemoveDisks()
default_nic_mode = self.cluster_default_nicparams[constants.NIC_MODE]
# Don't add/remove nics in routed mode, as we would need an ip to add
# them with
if self.opts.do_addremove_nics:
if default_nic_mode == constants.NIC_MODE_BRIDGED:
self.BurnAddRemoveNICs()
else:
Log("Skipping nic add/remove as the cluster is not in bridged mode")
if self.opts.do_activate_disks:
self.BurnActivateDisks()
if self.opts.rename:
self.BurnRename()
if self.opts.do_confd_tests:
self.BurnConfd()
if self.opts.do_startstop:
self.BurnStopStart()
has_err = False
finally:
if has_err:
Log("Error detected: opcode buffer follows:\n\n")
Log(self.GetFeedbackBuf())
Log("\n\n")
if not self.opts.keep_instances:
try:
self.BurnRemove()
except Exception, err: # pylint: disable=W0703
if has_err: # already detected errors, so errors in removal
# are quite expected
Log("Note: error detected during instance remove: %s", err)
else: # non-expected error
raise
return constants.EXIT_SUCCESS
def Main():
"""Main function.
"""
utils.SetupLogging(pathutils.LOG_BURNIN, sys.argv[0],
debug=False, stderr_logging=True)
return Burner().BurninCluster()
| vladimir-ipatov/ganeti | lib/tools/burnin.py | Python | gpl-2.0 | 42,651 |
# This file is a part of pysnapshotd, a program for automated backups
# Copyright (C) 2015-2016 Jonas Thiem
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import threading
class BufferedPipeObject(object):
def __init__(self):
self.closed = False
self.contents = b""
self.access_mutex = threading.Lock()
self.waiting_for_content_semaphore = \
threading.Semaphore()
self.waiting_for_content_counter = 0
self._write_func = None
def _set_write_func(self, f):
self.access_mutex.acquire()
self._write_func = f
self.access_mutex.release()
def close(self):
self.access_mutex.acquire()
self.closed = True
self.access_mutex.release()
def write(self, data):
# First, check if pipe is still open at all:
self.access_mutex.acquire()
if self.closed:
self.access_mutex.release()
raise OSError("broken pipe - pipe has been closed")
# Do nothing for an obvious dummy command:
if len(data) == 0:
self.access_mutex.release()
return 0
# Try to write with the write func if given:
# (which means this pipe object itself will always remain empty and
# .read() on it will block forever, since things are somewhat bypassed
# directly to some target write function)
if self._write_func != None:
try:
self._write_func(data)
except Exception:
self.closed = True
finally:
self.access_mutex.release()
return
# Otherwise, just put contents in internal buffer for reading from
# this pipe from "the other end":
try:
self.contents += data
i = 0
while i < self.waiting_for_content_counter:
self.waiting_for_content_semaphore.\
release()
i += 1
finally:
self.access_mutex.release()
def read(self, amount):
print(" >> PIPE READ: " + str(amount))
if amount <= 0:
print(" >> PIPE READ DATA: <empty read>")
return b""
self.access_mutex.acquire()
# Try to read data as long as needed to acquire requested amount:
obtained_data = b""
while True:
# If pipe was closed along this process, abort:
if self.closed:
self.access_mutex.release()
raise OSError("broken pipe - pipe has been closed")
# Try to obtain as much data as requested:
if len(self.contents) > 0:
added_data = self.contents[:amount]
obtained_data += added_data
self.contents = self.contents[len(added_data):]
amount -= len(added_data)
# If there is not enough data available, we will need to wait for
# more:
if amount > 0:
self.waiting_for_content_counter += 1
self.access_mutex.release()
self.waiting_for_content_semaphore.acquire()
self.access_mutex.acquire()
else:
assert(len(obtained_data) > 0)
print(" >> PIPE READ DATA: " + str(obtained_data))
return obtained_data
| JonasT/pyrsnapshotd | src/pysnapshotd/pipeobject.py | Python | gpl-2.0 | 4,032 |
# Copyright (C) 2008 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import absolute_import
"""Server-side pack repository related request implmentations."""
from bzrlib.smart.request import (
FailedSmartServerResponse,
SuccessfulSmartServerResponse,
)
from bzrlib.smart.repository import (
SmartServerRepositoryRequest,
)
class SmartServerPackRepositoryAutopack(SmartServerRepositoryRequest):
def do_repository_request(self, repository):
pack_collection = getattr(repository, '_pack_collection', None)
if pack_collection is None:
# This is a not a pack repo, so asking for an autopack is just a
# no-op.
return SuccessfulSmartServerResponse(('ok',))
repository.lock_write()
try:
repository._pack_collection.autopack()
finally:
repository.unlock()
return SuccessfulSmartServerResponse(('ok',))
| stewartsmith/bzr | bzrlib/smart/packrepository.py | Python | gpl-2.0 | 1,623 |
import os, platform
sysstr = platform.system()
if sysstr == "Windows":
LF = '\r\n'
elif sysstr == "Linux":
LF = '\n'
def StripStr(str):
# @Function: Remove space(' ') and indent('\t') at the begin and end of the string
oldStr = ''
newStr = str
while oldStr != newStr:
oldStr = newStr
newStr = oldStr.strip('\t')
newStr = newStr.strip(' ')
return newStr
def SplitStr(str, spliters=None):
# @Function: Split string by spliter space(' ') and indent('\t') as default
# spliters = [' ', '\t']
# spliters = []
# if spliter is not None:
# spliters.append(spliter)
if spliters is None:
spliters = [' ', '\t']
destStrs = []
srcStrs = [str]
while True:
oldDestStrs = srcStrs[:]
for s in spliters:
for srcS in srcStrs:
tempStrs = srcS.split(s)
for tempS in tempStrs:
tempS = StripStr(tempS)
if tempS != '':
destStrs.append(tempS)
srcStrs = destStrs[:]
destStrs = []
if oldDestStrs == srcStrs:
destStrs = srcStrs[:]
break
return destStrs
def isPathExists(path):
if os.path.isdir(path):
if os.path.exists(path):
return True
else:
return False
else:
return False
def WriteLog(logfile, contentlist, MODE='replace'):
if os.path.exists(logfile):
if MODE == 'replace':
os.remove(logfile)
logStatus = open(logfile, 'w')
else:
logStatus = open(logfile, 'a')
else:
logStatus = open(logfile, 'w')
if isinstance(contentlist, list) or isinstance(contentlist,tuple):
for content in contentlist:
logStatus.write("%s%s" % (content, LF))
else:
logStatus.write(contentlist)
logStatus.flush()
logStatus.close() | seims/SEIMS | scenario_analysis/util.py | Python | gpl-2.0 | 1,939 |
# coding: utf-8
# Copyright 2014 Globo.com Player authors. All rights reserved.
# Use of this source code is governed by a MIT License
# license that can be found in the LICENSE file.
from collections import namedtuple
import os
import errno
import math
try:
import urlparse as url_parser
except ImportError:
import urllib.parse as url_parser
import parser
class M3U8(object):
'''
Represents a single M3U8 playlist. Should be instantiated with
the content as string.
Parameters:
`content`
the m3u8 content as string
`base_path`
all urls (key and segments url) will be updated with this base_path,
ex.:
base_path = "http://videoserver.com/hls"
/foo/bar/key.bin --> http://videoserver.com/hls/key.bin
http://vid.com/segment1.ts --> http://videoserver.com/hls/segment1.ts
can be passed as parameter or setted as an attribute to ``M3U8`` object.
`base_uri`
uri the playlist comes from. it is propagated to SegmentList and Key
ex.: http://example.com/path/to
Attributes:
`key`
it's a `Key` object, the EXT-X-KEY from m3u8. Or None
`segments`
a `SegmentList` object, represents the list of `Segment`s from this playlist
`is_variant`
Returns true if this M3U8 is a variant playlist, with links to
other M3U8s with different bitrates.
If true, `playlists` is a list of the playlists available,
and `iframe_playlists` is a list of the i-frame playlists available.
`is_endlist`
Returns true if EXT-X-ENDLIST tag present in M3U8.
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.8
`playlists`
If this is a variant playlist (`is_variant` is True), returns a list of
Playlist objects
`iframe_playlists`
If this is a variant playlist (`is_variant` is True), returns a list of
IFramePlaylist objects
`playlist_type`
A lower-case string representing the type of the playlist, which can be
one of VOD (video on demand) or EVENT.
`media`
If this is a variant playlist (`is_variant` is True), returns a list of
Media objects
`target_duration`
Returns the EXT-X-TARGETDURATION as an integer
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.2
`media_sequence`
Returns the EXT-X-MEDIA-SEQUENCE as an integer
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.3
`program_date_time`
Returns the EXT-X-PROGRAM-DATE-TIME as a string
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.5
`version`
Return the EXT-X-VERSION as is
`allow_cache`
Return the EXT-X-ALLOW-CACHE as is
`files`
Returns an iterable with all files from playlist, in order. This includes
segments and key uri, if present.
`base_uri`
It is a property (getter and setter) used by
SegmentList and Key to have absolute URIs.
`is_i_frames_only`
Returns true if EXT-X-I-FRAMES-ONLY tag present in M3U8.
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.12
`is_independent_segments`
Returns true if EXT-X-INDEPENDENT-SEGMENTS tag present in M3U8.
https://tools.ietf.org/html/draft-pantos-http-live-streaming-13#section-3.4.16
'''
simple_attributes = (
# obj attribute # parser attribute
('is_variant', 'is_variant'),
('is_endlist', 'is_endlist'),
('is_i_frames_only', 'is_i_frames_only'),
('target_duration', 'targetduration'),
('media_sequence', 'media_sequence'),
('program_date_time', 'program_date_time'),
('is_independent_segments', 'is_independent_segments'),
('version', 'version'),
('allow_cache', 'allow_cache'),
('playlist_type', 'playlist_type')
)
def __init__(self, content=None, base_path=None, base_uri=None, strict=False):
if content is not None:
self.data = parser.parse(content, strict)
else:
self.data = {}
self._base_uri = base_uri
if self._base_uri:
if not self._base_uri.endswith('/'):
self._base_uri += '/'
self._initialize_attributes()
self.base_path = base_path
def _initialize_attributes(self):
self.key = Key(base_uri=self.base_uri, **self.data['key']) if 'key' in self.data else None
self.segments = SegmentList([ Segment(base_uri=self.base_uri, **params)
for params in self.data.get('segments', []) ])
for attr, param in self.simple_attributes:
setattr(self, attr, self.data.get(param))
self.files = []
if self.key:
self.files.append(self.key.uri)
self.files.extend(self.segments.uri)
self.media = MediaList([ Media(base_uri=self.base_uri,
**media)
for media in self.data.get('media', []) ])
self.playlists = PlaylistList([ Playlist(base_uri=self.base_uri,
media=self.media,
**playlist)
for playlist in self.data.get('playlists', []) ])
self.iframe_playlists = PlaylistList()
for ifr_pl in self.data.get('iframe_playlists', []):
self.iframe_playlists.append(
IFramePlaylist(base_uri=self.base_uri,
uri=ifr_pl['uri'],
iframe_stream_info=ifr_pl['iframe_stream_info'])
)
def __unicode__(self):
return self.dumps()
@property
def base_uri(self):
return self._base_uri
@base_uri.setter
def base_uri(self, new_base_uri):
self._base_uri = new_base_uri
self.media.base_uri = new_base_uri
self.playlists.base_uri = new_base_uri
self.segments.base_uri = new_base_uri
@property
def base_path(self):
return self._base_path
@base_path.setter
def base_path(self, newbase_path):
self._base_path = newbase_path
self._update_base_path()
def _update_base_path(self):
if self._base_path is None:
return
if self.key:
self.key.base_path = self.base_path
self.media.base_path = self.base_path
self.segments.base_path = self.base_path
self.playlists.base_path = self.base_path
def add_playlist(self, playlist):
self.is_variant = True
self.playlists.append(playlist)
def add_iframe_playlist(self, iframe_playlist):
if iframe_playlist is not None:
self.is_variant = True
self.iframe_playlists.append(iframe_playlist)
def add_media(self, media):
self.media.append(media)
def add_segment(self, segment):
self.segments.append(segment)
def dumps(self):
'''
Returns the current m3u8 as a string.
You could also use unicode(<this obj>) or str(<this obj>)
'''
output = ['#EXTM3U']
if self.is_independent_segments:
output.append('#EXT-X-INDEPENDENT-SEGMENTS')
if self.media_sequence:
output.append('#EXT-X-MEDIA-SEQUENCE:' + str(self.media_sequence))
if self.allow_cache:
output.append('#EXT-X-ALLOW-CACHE:' + self.allow_cache.upper())
if self.version:
output.append('#EXT-X-VERSION:' + self.version)
if self.key:
output.append(str(self.key))
if self.target_duration:
output.append('#EXT-X-TARGETDURATION:' + int_or_float_to_string(self.target_duration))
if self.program_date_time is not None:
output.append('#EXT-X-PROGRAM-DATE-TIME:' + parser.format_date_time(self.program_date_time))
if not (self.playlist_type is None or self.playlist_type == ''):
output.append(
'#EXT-X-PLAYLIST-TYPE:%s' % str(self.playlist_type).upper())
if self.is_i_frames_only:
output.append('#EXT-X-I-FRAMES-ONLY')
if self.is_variant:
if self.media:
output.append(str(self.media))
output.append(str(self.playlists))
if self.iframe_playlists:
output.append(str(self.iframe_playlists))
output.append(str(self.segments))
if self.is_endlist:
output.append('#EXT-X-ENDLIST')
return '\n'.join(output)
def dump(self, filename):
'''
Saves the current m3u8 to ``filename``
'''
self._create_sub_directories(filename)
with open(filename, 'w') as fileobj:
fileobj.write(self.dumps())
def _create_sub_directories(self, filename):
basename = os.path.dirname(filename)
try:
os.makedirs(basename)
except OSError as error:
if error.errno != errno.EEXIST:
raise
class BasePathMixin(object):
@property
def absolute_uri(self):
if self.uri is None:
return None
if parser.is_url(self.uri):
return self.uri
else:
if self.base_uri is None:
raise ValueError('There can not be `absolute_uri` with no `base_uri` set')
return _urijoin(self.base_uri, self.uri)
@property
def base_path(self):
return os.path.dirname(self.uri)
@base_path.setter
def base_path(self, newbase_path):
if not self.base_path:
self.uri = "%s/%s" % (newbase_path, self.uri)
self.uri = self.uri.replace(self.base_path, newbase_path)
class GroupedBasePathMixin(object):
def _set_base_uri(self, new_base_uri):
for item in self:
item.base_uri = new_base_uri
base_uri = property(None, _set_base_uri)
def _set_base_path(self, newbase_path):
for item in self:
item.base_path = newbase_path
base_path = property(None, _set_base_path)
class Segment(BasePathMixin):
'''
A video segment from a M3U8 playlist
`uri`
a string with the segment uri
`title`
title attribute from EXTINF parameter
`program_date_time`
Returns the EXT-X-PROGRAM-DATE-TIME as a datetime
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.5
`discontinuity`
Returns a boolean indicating if a EXT-X-DISCONTINUITY tag exists
http://tools.ietf.org/html/draft-pantos-http-live-streaming-13#section-3.4.11
`cue_out`
Returns a boolean indicating if a EXT-X-CUE-OUT-CONT tag exists
`duration`
duration attribute from EXTINF parameter
`base_uri`
uri the key comes from in URI hierarchy. ex.: http://example.com/path/to
`byterange`
byterange attribute from EXT-X-BYTERANGE parameter
`key`
Key used to encrypt the segment (EXT-X-KEY)
'''
def __init__(self, uri, base_uri, program_date_time=None, duration=None,
title=None, byterange=None, cue_out=False, discontinuity=False, key=None):
self.uri = uri
self.duration = duration
self.title = title
self.base_uri = base_uri
self.byterange = byterange
self.program_date_time = program_date_time
self.discontinuity = discontinuity
self.cue_out = cue_out
self.key = Key(base_uri=base_uri,**key) if key else None
def dumps(self, last_segment):
output = []
if last_segment and self.key != last_segment.key:
output.append(str(self.key))
output.append('\n')
if self.discontinuity:
output.append('#EXT-X-DISCONTINUITY\n')
if self.program_date_time:
output.append('#EXT-X-PROGRAM-DATE-TIME:%s\n' % parser.format_date_time(self.program_date_time))
if self.cue_out:
output.append('#EXT-X-CUE-OUT-CONT\n')
output.append('#EXTINF:%s,' % int_or_float_to_string(self.duration))
if self.title:
output.append(quoted(self.title))
output.append('\n')
if self.byterange:
output.append('#EXT-X-BYTERANGE:%s\n' % self.byterange)
output.append(self.uri)
return ''.join(output)
def __str__(self):
return self.dumps(None)
class SegmentList(list, GroupedBasePathMixin):
def __str__(self):
output = []
last_segment = None
for segment in self:
output.append(segment.dumps(last_segment))
last_segment = segment
return '\n'.join(output)
@property
def uri(self):
return [seg.uri for seg in self]
class Key(BasePathMixin):
'''
Key used to encrypt the segments in a m3u8 playlist (EXT-X-KEY)
`method`
is a string. ex.: "AES-128"
`uri`
is a string. ex:: "https://priv.example.com/key.php?r=52"
`base_uri`
uri the key comes from in URI hierarchy. ex.: http://example.com/path/to
`iv`
initialization vector. a string representing a hexadecimal number. ex.: 0X12A
'''
def __init__(self, method, uri, base_uri, iv=None, keyformat=None, keyformatversions=None):
self.method = method
self.uri = uri
self.iv = iv
self.keyformat = keyformat
self.keyformatversions = keyformatversions
self.base_uri = base_uri
def __str__(self):
output = [
'METHOD=%s' % self.method,
]
if self.uri:
output.append('URI="%s"' % self.uri)
if self.iv:
output.append('IV=%s' % self.iv)
if self.keyformat:
output.append('KEYFORMAT="%s"' % self.keyformat)
if self.keyformatversions:
output.append('KEYFORMATVERSIONS="%s"' % self.keyformatversions)
return '#EXT-X-KEY:' + ','.join(output)
def __eq__(self, other):
return self.method == other.method and \
self.uri == other.uri and \
self.iv == other.iv and \
self.base_uri == other.base_uri and \
self.keyformat == other.keyformat and \
self.keyformatversions == other.keyformatversions
def __ne__(self, other):
return not self.__eq__(other)
class Playlist(BasePathMixin):
'''
Playlist object representing a link to a variant M3U8 with a specific bitrate.
Attributes:
`stream_info` is a named tuple containing the attributes: `program_id`,
`bandwidth`, `average_bandwidth`, `resolution`, `codecs` and `resolution`
which is a a tuple (w, h) of integers
`media` is a list of related Media entries.
More info: http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.10
'''
def __init__(self, uri, stream_info, media, base_uri):
self.uri = uri
self.base_uri = base_uri
resolution = stream_info.get('resolution')
if resolution != None:
resolution = resolution.strip('"')
values = resolution.split('x')
resolution_pair = (int(values[0]), int(values[1]))
else:
resolution_pair = None
self.stream_info = StreamInfo(
bandwidth=stream_info['bandwidth'],
average_bandwidth=stream_info.get('average_bandwidth'),
program_id=stream_info.get('program_id'),
resolution=resolution_pair,
codecs=stream_info.get('codecs')
)
self.media = []
for media_type in ('audio', 'video', 'subtitles'):
group_id = stream_info.get(media_type)
if not group_id:
continue
self.media += filter(lambda m: m.group_id == group_id, media)
def __str__(self):
stream_inf = []
if self.stream_info.program_id:
stream_inf.append('PROGRAM-ID=%d' % self.stream_info.program_id)
if self.stream_info.bandwidth:
stream_inf.append('BANDWIDTH=%d' % self.stream_info.bandwidth)
if self.stream_info.average_bandwidth:
stream_inf.append('AVERAGE-BANDWIDTH=%d' %
self.stream_info.average_bandwidth)
if self.stream_info.resolution:
res = str(self.stream_info.resolution[0]) + 'x' + str(self.stream_info.resolution[1])
stream_inf.append('RESOLUTION=' + res)
if self.stream_info.codecs:
stream_inf.append('CODECS=' + quoted(self.stream_info.codecs))
for media in self.media:
media_type = media.type.upper()
stream_inf.append('%s="%s"' % (media_type, media.group_id))
return '#EXT-X-STREAM-INF:' + ','.join(stream_inf) + '\n' + self.uri
class IFramePlaylist(BasePathMixin):
'''
IFramePlaylist object representing a link to a
variant M3U8 i-frame playlist with a specific bitrate.
Attributes:
`iframe_stream_info` is a named tuple containing the attributes:
`program_id`, `bandwidth`, `codecs` and `resolution` which
is a tuple (w, h) of integers
More info: http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.13
'''
def __init__(self, base_uri, uri, iframe_stream_info):
self.uri = uri
self.base_uri = base_uri
resolution = iframe_stream_info.get('resolution')
if resolution is not None:
values = resolution.split('x')
resolution_pair = (int(values[0]), int(values[1]))
else:
resolution_pair = None
self.iframe_stream_info = StreamInfo(
bandwidth=iframe_stream_info.get('bandwidth'),
average_bandwidth=None,
program_id=iframe_stream_info.get('program_id'),
resolution=resolution_pair,
codecs=iframe_stream_info.get('codecs')
)
def __str__(self):
iframe_stream_inf = []
if self.iframe_stream_info.program_id:
iframe_stream_inf.append('PROGRAM-ID=%d' %
self.iframe_stream_info.program_id)
if self.iframe_stream_info.bandwidth:
iframe_stream_inf.append('BANDWIDTH=%d' %
self.iframe_stream_info.bandwidth)
if self.iframe_stream_info.resolution:
res = (str(self.iframe_stream_info.resolution[0]) + 'x' +
str(self.iframe_stream_info.resolution[1]))
iframe_stream_inf.append('RESOLUTION=' + res)
if self.iframe_stream_info.codecs:
iframe_stream_inf.append('CODECS=' +
quoted(self.iframe_stream_info.codecs))
if self.uri:
iframe_stream_inf.append('URI=' + quoted(self.uri))
return '#EXT-X-I-FRAME-STREAM-INF:' + ','.join(iframe_stream_inf)
StreamInfo = namedtuple(
'StreamInfo',
['bandwidth', 'average_bandwidth', 'program_id', 'resolution', 'codecs']
)
class Media(BasePathMixin):
'''
A media object from a M3U8 playlist
https://tools.ietf.org/html/draft-pantos-http-live-streaming-16#section-4.3.4.1
`uri`
a string with the media uri
`type`
`group_id`
`language`
`assoc-language`
`name`
`default`
`autoselect`
`forced`
`instream_id`
`characteristics`
attributes in the EXT-MEDIA tag
`base_uri`
uri the media comes from in URI hierarchy. ex.: http://example.com/path/to
'''
def __init__(self, uri=None, type=None, group_id=None, language=None,
name=None, default=None, autoselect=None, forced=None,
characteristics=None, assoc_language=None,
instream_id=None,base_uri=None, **extras):
self.base_uri = base_uri
self.uri = uri
self.type = type
self.group_id = group_id
self.language = language
self.name = name
self.default = default
self.autoselect = autoselect
self.forced = forced
self.assoc_language = assoc_language
self.instream_id = instream_id
self.characteristics = characteristics
self.extras = extras
def dumps(self):
media_out = []
if self.uri:
media_out.append('URI=' + quoted(self.uri))
if self.type:
media_out.append('TYPE=' + self.type)
if self.group_id:
media_out.append('GROUP-ID=' + quoted(self.group_id))
if self.language:
media_out.append('LANGUAGE=' + quoted(self.language))
if self.assoc_language:
media_out.append('ASSOC-LANGUAGE=' + quoted(self.assoc_language))
if self.name:
media_out.append('NAME=' + quoted(self.name))
if self.default:
media_out.append('DEFAULT=' + self.default)
if self.autoselect:
media_out.append('AUTOSELECT=' + self.autoselect)
if self.forced:
media_out.append('FORCED=' + self.forced)
if self.instream_id:
media_out.append('INSTREAM-ID=' + self.instream_id)
if self.characteristics:
media_out.append('CHARACTERISTICS=' + quoted(self.characteristics))
return ('#EXT-X-MEDIA:' + ','.join(media_out))
def __str__(self):
return self.dumps()
class MediaList(list, GroupedBasePathMixin):
def __str__(self):
output = [str(playlist) for playlist in self]
return '\n'.join(output)
@property
def uri(self):
return [media.uri for media in self]
class PlaylistList(list, GroupedBasePathMixin):
def __str__(self):
output = [str(playlist) for playlist in self]
return '\n'.join(output)
def denormalize_attribute(attribute):
return attribute.replace('_','-').upper()
def quoted(string):
return '"%s"' % string
def _urijoin(base_uri, path):
if parser.is_url(base_uri):
return url_parser.urljoin(base_uri, path)
else:
return os.path.normpath(os.path.join(base_uri, path.strip('/')))
def int_or_float_to_string(number):
return str(int(number)) if number == math.floor(number) else str(number)
| JaxxC/goodgame.xbmc | plugin.video.goodgame/resources/lib/m3u8/model.py | Python | gpl-2.0 | 22,352 |
# coding: utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import socket
import os
import re
import select
import time
import paramiko
import struct
import fcntl
import signal
import textwrap
import getpass
import fnmatch
import readline
import datetime
from multiprocessing import Pool
os.environ['DJANGO_SETTINGS_MODULE'] = 'jumpserver.settings'
from juser.models import User
from jlog.models import Log
from jumpserver.api import CONF, BASE_DIR, ServerError, user_perm_group_api, user_perm_group_hosts_api, get_user_host
from jumpserver.api import AssetAlias, get_connect_item
try:
import termios
import tty
except ImportError:
print '\033[1;31mOnly UnixLike supported.\033[0m'
time.sleep(3)
sys.exit()
CONF.read(os.path.join(BASE_DIR, 'jumpserver.conf'))
LOG_DIR = os.path.join(BASE_DIR, 'logs')
SSH_KEY_DIR = os.path.join(BASE_DIR, 'keys')
SERVER_KEY_DIR = os.path.join(SSH_KEY_DIR, 'server')
LOGIN_NAME = getpass.getuser()
def color_print(msg, color='blue'):
"""Print colorful string."""
color_msg = {'blue': '\033[1;36m%s\033[0m',
'green': '\033[1;32m%s\033[0m',
'red': '\033[1;31m%s\033[0m'}
print color_msg.get(color, 'blue') % msg
def color_print_exit(msg, color='red'):
"""Print colorful string and exit."""
color_print(msg, color=color)
time.sleep(2)
sys.exit()
def get_win_size():
"""This function use to get the size of the windows!"""
if 'TIOCGWINSZ' in dir(termios):
TIOCGWINSZ = termios.TIOCGWINSZ
else:
TIOCGWINSZ = 1074295912L # Assume
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(sys.stdout.fileno(), TIOCGWINSZ, s)
return struct.unpack('HHHH', x)[0:2]
def set_win_size(sig, data):
"""This function use to set the window size of the terminal!"""
try:
win_size = get_win_size()
channel.resize_pty(height=win_size[0], width=win_size[1])
except:
pass
def log_record(username, host):
"""Logging user command and output."""
connect_log_dir = os.path.join(LOG_DIR, 'connect')
timestamp_start = int(time.time())
today = time.strftime('%Y%m%d', time.localtime(timestamp_start))
time_now = time.strftime('%H%M%S', time.localtime(timestamp_start))
today_connect_log_dir = os.path.join(connect_log_dir, today)
log_filename = '%s_%s_%s.log' % (username, host, time_now)
log_file_path = os.path.join(today_connect_log_dir, log_filename)
dept_name = User.objects.get(username=username).dept.name
pid = os.getpid()
pts = os.popen("ps axu | awk '$2==%s{ print $7 }'" % pid).read().strip()
ip_list = os.popen("who | awk '$2==\"%s\"{ print $5 }'" % pts).read().strip('()\n')
if not os.path.isdir(today_connect_log_dir):
try:
os.makedirs(today_connect_log_dir)
os.chmod(today_connect_log_dir, 0777)
except OSError:
raise ServerError('Create %s failed, Please modify %s permission.' % (today_connect_log_dir, connect_log_dir))
try:
log_file = open(log_file_path, 'a')
except IOError:
raise ServerError('Create logfile failed, Please modify %s permission.' % today_connect_log_dir)
log = Log(user=username, host=host, remote_ip=ip_list, dept_name=dept_name,
log_path=log_file_path, start_time=datetime.datetime.now(), pid=pid)
log_file.write('Starttime is %s\n' % datetime.datetime.now())
log.save()
return log_file, log
def posix_shell(chan, username, host):
"""
Use paramiko channel connect server interactive.
"""
log_file, log = log_record(username, host)
old_tty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
chan.settimeout(0.0)
while True:
try:
r, w, e = select.select([chan, sys.stdin], [], [])
except:
pass
if chan in r:
try:
x = chan.recv(10240)
if len(x) == 0:
break
sys.stdout.write(x)
sys.stdout.flush()
log_file.write(x)
log_file.flush()
except socket.timeout:
pass
if sys.stdin in r:
x = os.read(sys.stdin.fileno(), 1)
if len(x) == 0:
break
chan.send(x)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
log_file.write('Endtime is %s' % datetime.datetime.now())
log_file.close()
log.is_finished = True
log.log_finished = False
log.end_time = datetime.datetime.now()
log.save()
print_prompt()
def get_user_hostgroup(username):
"""Get the hostgroups of under the user control."""
groups_attr = {}
group_all = user_perm_group_api(username)
for group in group_all:
groups_attr[group.name] = [group.id, group.comment]
return groups_attr
def get_user_hostgroup_host(username, gid):
"""Get the hostgroup hosts of under the user control."""
hosts_attr = {}
user = User.objects.get(username=username)
hosts = user_perm_group_hosts_api(gid)
for host in hosts:
alias = AssetAlias.objects.filter(user=user, host=host)
if alias and alias[0].alias != '':
hosts_attr[host.ip] = [host.id, host.ip, alias[0].alias]
else:
hosts_attr[host.ip] = [host.id, host.ip, host.comment]
return hosts_attr
def verify_connect(username, part_ip):
ip_matched = []
try:
hosts_attr = get_user_host(username)
hosts = hosts_attr.values()
except ServerError, e:
color_print(e, 'red')
return False
for ip_info in hosts:
if part_ip in ip_info[1:] and part_ip:
ip_matched = [ip_info[1]]
break
for info in ip_info[1:]:
if part_ip in info:
ip_matched.append(ip_info[1])
ip_matched = list(set(ip_matched))
if len(ip_matched) > 1:
for ip in ip_matched:
print '%-15s -- %s' % (ip, hosts_attr[ip][2])
elif len(ip_matched) < 1:
color_print('No Permission or No host.', 'red')
else:
username, password, host, port = get_connect_item(username, ip_matched[0])
connect(username, password, host, port, LOGIN_NAME)
def print_prompt():
msg = """\033[1;32m### Welcome Use JumpServer To Login. ### \033[0m
1) Type \033[32mIP or Part IP, Host Alias or Comments \033[0m To Login.
2) Type \033[32mP/p\033[0m To Print The Servers You Available.
3) Type \033[32mG/g\033[0m To Print The Server Groups You Available.
4) Type \033[32mG/g(1-N)\033[0m To Print The Server Group Hosts You Available.
5) Type \033[32mE/e\033[0m To Execute Command On Several Servers.
6) Type \033[32mQ/q\033[0m To Quit.
"""
print textwrap.dedent(msg)
def print_user_host(username):
try:
hosts_attr = get_user_host(username)
except ServerError, e:
color_print(e, 'red')
return
hosts = hosts_attr.keys()
hosts.sort()
for ip in hosts:
print '%-15s -- %s' % (ip, hosts_attr[ip][2])
print ''
def print_user_hostgroup(username):
group_attr = get_user_hostgroup(username)
groups = group_attr.keys()
for g in groups:
print "[%3s] %s -- %s" % (group_attr[g][0], g, group_attr[g][1])
def print_user_hostgroup_host(username, gid):
pattern = re.compile(r'\d+')
match = pattern.match(gid)
if match:
hosts_attr = get_user_hostgroup_host(username, gid)
hosts = hosts_attr.keys()
hosts.sort()
for ip in hosts:
print '%-15s -- %s' % (ip, hosts_attr[ip][2])
else:
color_print('No such group id, Please check it.', 'red')
def connect(username, password, host, port, login_name):
"""
Connect server.
"""
ps1 = "PS1='[\u@%s \W]\$ ' && TERM=xterm && export TERM\n" % host
login_msg = "clear;echo -e '\\033[32mLogin %s done. Enjoy it.\\033[0m'\n" % host
# Make a ssh connection
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(host, port=port, username=username, password=password, compress=True)
except paramiko.ssh_exception.AuthenticationException, paramiko.ssh_exception.SSHException:
raise ServerError('Authentication Error.')
except socket.error:
raise ServerError('Connect SSH Socket Port Error, Please Correct it.')
# Make a channel and set windows size
global channel
win_size = get_win_size()
channel = ssh.invoke_shell(height=win_size[0], width=win_size[1])
try:
signal.signal(signal.SIGWINCH, set_win_size)
except:
pass
# Set PS1 and msg it
channel.send(ps1)
channel.send(login_msg)
# Make ssh interactive tunnel
posix_shell(channel, login_name, host)
# Shutdown channel socket
channel.close()
ssh.close()
def remote_exec_cmd(ip, port, username, password, cmd):
try:
time.sleep(5)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, port, username, password, timeout=5)
stdin, stdout, stderr = ssh.exec_command("bash -l -c '%s'" % cmd)
out = stdout.readlines()
err = stderr.readlines()
color_print('%s:' % ip, 'blue')
for i in out:
color_print(" " * 4 + i.strip(), 'green')
for j in err:
color_print(" " * 4 + j.strip(), 'red')
ssh.close()
except Exception as e:
color_print(ip + ':', 'blue')
color_print(str(e), 'red')
def multi_remote_exec_cmd(hosts, username, cmd):
pool = Pool(processes=5)
for host in hosts:
username, password, ip, port = get_connect_item(username, host)
pool.apply_async(remote_exec_cmd, (ip, port, username, password, cmd))
pool.close()
pool.join()
def exec_cmd_servers(username):
color_print("You can choose in the following IP(s), Use glob or ips split by comma. q/Q to PreLayer.", 'green')
print_user_host(LOGIN_NAME)
while True:
hosts = []
inputs = raw_input('\033[1;32mip(s)>: \033[0m')
if inputs in ['q', 'Q']:
break
get_hosts = get_user_host(username).keys()
if ',' in inputs:
ips_input = inputs.split(',')
for host in ips_input:
if host in get_hosts:
hosts.append(host)
else:
for host in get_hosts:
if fnmatch.fnmatch(host, inputs):
hosts.append(host.strip())
if len(hosts) == 0:
color_print("Check again, Not matched any ip!", 'red')
continue
else:
print "You matched ip: %s" % hosts
color_print("Input the Command , The command will be Execute on servers, q/Q to quit.", 'green')
while True:
cmd = raw_input('\033[1;32mCmd(s): \033[0m')
if cmd in ['q', 'Q']:
break
exec_log_dir = os.path.join(LOG_DIR, 'exec_cmds')
if not os.path.isdir(exec_log_dir):
os.mkdir(exec_log_dir)
os.chmod(exec_log_dir, 0777)
filename = "%s/%s.log" % (exec_log_dir, time.strftime('%Y%m%d'))
f = open(filename, 'a')
f.write("DateTime: %s User: %s Host: %s Cmds: %s\n" %
(time.strftime('%Y/%m/%d %H:%M:%S'), username, hosts, cmd))
multi_remote_exec_cmd(hosts, username, cmd)
if __name__ == '__main__':
print_prompt()
gid_pattern = re.compile(r'^g\d+$')
try:
while True:
try:
option = raw_input("\033[1;32mOpt or IP>:\033[0m ")
except EOFError:
print
continue
except KeyboardInterrupt:
sys.exit(0)
if option in ['P', 'p']:
print_user_host(LOGIN_NAME)
continue
elif option in ['G', 'g']:
print_user_hostgroup(LOGIN_NAME)
continue
elif gid_pattern.match(option):
gid = option[1:].strip()
print_user_hostgroup_host(LOGIN_NAME, gid)
continue
elif option in ['E', 'e']:
exec_cmd_servers(LOGIN_NAME)
elif option in ['Q', 'q', 'exit']:
sys.exit()
else:
try:
verify_connect(LOGIN_NAME, option)
except ServerError, e:
color_print(e, 'red')
except IndexError:
pass
| watchsky126/jumpserver | connect.py | Python | gpl-2.0 | 12,846 |
import hashlib
import binascii
class MerkleTools(object):
def __init__(self, hash_type="sha256"):
hash_type = hash_type.lower()
if hash_type == 'sha256':
self.hash_function = hashlib.sha256
elif hash_type == 'md5':
self.hash_function = hashlib.md5
elif hash_type == 'sha224':
self.hash_function = hashlib.sha224
elif hash_type == 'sha384':
self.hash_function = hashlib.sha384
elif hash_type == 'sha512':
self.hash_function = hashlib.sha512
elif hash_type == 'sha3_256':
self.hash_function = hashlib.sha3_256
elif hash_type == 'sha3_224':
self.hash_function = hashlib.sha3_224
elif hash_type == 'sha3_384':
self.hash_function = hashlib.sha3_384
elif hash_type == 'sha3_512':
self.hash_function = hashlib.sha3_512
else:
raise Exception('`hash_type` {} nor supported'.format(hash_type))
self.reset_tree()
def _to_hex(self, x):
try: # python3
return x.hex()
except: # python2
return binascii.hexlify(x)
def reset_tree(self):
self.leaves = list()
self.levels = None
self.is_ready = False
def add_leaf(self, values, do_hash=False):
self.is_ready = False
# check if single leaf
if isinstance(values, tuple) or isinstance(values, list):
for v in values:
if do_hash:
v = v.encode('utf-8')
v = self.hash_function(v).hexdigest()
v = bytearray.fromhex(v)
else:
v = bytearray.fromhex(v)
self.leaves.append(v)
else:
if do_hash:
v = values.encode("utf-8")
v = self.hash_function(v).hexdigest()
v = bytearray.fromhex(v)
else:
v = bytearray.fromhex(values)
self.leaves.append(v)
def get_leaf(self, index):
return self._to_hex(self.leaves[index])
def get_leaf_count(self):
return len(self.leaves)
def get_tree_ready_state(self):
return self.is_ready
def _calculate_next_level(self):
solo_leave = None
N = len(self.levels[0]) # number of leaves on the level
if N % 2 == 1: # if odd number of leaves on the level
solo_leave = self.levels[0][-1]
N -= 1
new_level = []
for l, r in zip(self.levels[0][0:N:2], self.levels[0][1:N:2]):
new_level.append(self.hash_function(l+r).digest())
if solo_leave is not None:
new_level.append(solo_leave)
self.levels = [new_level, ] + self.levels # prepend new level
def make_tree(self):
self.is_ready = False
if self.get_leaf_count() > 0:
self.levels = [self.leaves, ]
while len(self.levels[0]) > 1:
self._calculate_next_level()
self.is_ready = True
def get_merkle_root(self):
if self.is_ready:
if self.levels is not None:
return self._to_hex(self.levels[0][0])
else:
return None
else:
return None
def get_proof(self, index):
if self.levels is None:
return None
elif not self.is_ready or index > len(self.leaves)-1 or index < 0:
return None
else:
proof = []
for x in range(len(self.levels) - 1, 0, -1):
level_len = len(self.levels[x])
if (index == level_len - 1) and (level_len % 2 == 1): # skip if this is an odd end node
index = int(index / 2.)
continue
is_right_node = index % 2
sibling_index = index - 1 if is_right_node else index + 1
sibling_pos = "left" if is_right_node else "right"
sibling_value = self._to_hex(self.levels[x][sibling_index])
proof.append({sibling_pos: sibling_value})
index = int(index / 2.)
return proof
def validate_proof(self, proof, target_hash, merkle_root):
merkle_root = bytearray.fromhex(merkle_root)
target_hash = bytearray.fromhex(target_hash)
if len(proof) == 0:
return target_hash == merkle_root
else:
proof_hash = target_hash
for p in proof:
try:
# the sibling is a left node
sibling = bytearray.fromhex(p['left'])
proof_hash = self.hash_function(sibling + proof_hash).digest()
except:
# the sibling is a right node
sibling = bytearray.fromhex(p['right'])
proof_hash = self.hash_function(proof_hash + sibling).digest()
return proof_hash == merkle_root
| OliverCole/ZeroNet | src/lib/merkletools/__init__.py | Python | gpl-2.0 | 4,984 |
# encoding: utf-8
"""
Miscellaneous functions, which are useful for handling bodies.
"""
from yade.wrapper import *
import utils,math,numpy
try:
from minieigen import *
except ImportError:
from miniEigen import *
#spheresPackDimensions==================================================
def spheresPackDimensions(idSpheres=[],mask=-1):
"""The function accepts the list of spheres id's or list of bodies and calculates max and min dimensions, geometrical center.
:param list idSpheres: list of spheres
:param int mask: :yref:`Body.mask` for the checked bodies
:return: dictionary with keys ``min`` (minimal dimension, Vector3), ``max`` (maximal dimension, Vector3), ``minId`` (minimal dimension sphere Id, Vector3), ``maxId`` (maximal dimension sphere Id, Vector3), ``center`` (central point of bounding box, Vector3), ``extends`` (sizes of bounding box, Vector3), ``volume`` (volume of spheres, Real), ``mass`` (mass of spheres, Real), ``number`` (number of spheres, int),
"""
idSpheresIter=[]
if (len(idSpheres)<1):
#check mask
ifSpherMask=[]
if (mask>-1): #The case, when only the mask was given, without list of ids
for i in O.bodies:
if ((i.mask&mask)<>0):
ifSpherMask.append(i.id)
if (len(ifSpherMask)<2):
raise RuntimeWarning("Not enough bodies to analyze with given mask")
else:
idSpheresIter=ifSpherMask
else:
raise RuntimeWarning("Only a list of particles with length > 1 can be analyzed")
else:
idSpheresIter=idSpheres
minVal = Vector3.Zero
maxVal = Vector3.Zero
minId = Vector3.Zero
maxId = Vector3.Zero
counter = 0
volume = 0.0
mass = 0.0
for i in idSpheresIter:
if (type(i).__name__=='int'):
b = O.bodies[i] #We have received a list of ID's
elif (type(i).__name__=='Body'):
b = i #We have recevied a list of bodies
else:
raise TypeError("Unknow type of data, should be list of int's or bodies's")
if (b):
spherePosition=b.state.pos #skip non-existent spheres
try:
sphereRadius=b.shape.radius #skip non-spheres
except AttributeError: continue
if (mask>-1) and ((mask&b.mask)==0): continue #skip bodies with wrong mask
sphereRadiusVec3 = Vector3(sphereRadius,sphereRadius,sphereRadius)
sphereMax = spherePosition + sphereRadiusVec3
sphereMin = spherePosition - sphereRadiusVec3
for dim in range(0,3):
if ((sphereMax[dim]>maxVal[dim]) or (counter==0)):
maxVal[dim]=sphereMax[dim]
maxId[dim] = b.id
if ((sphereMin[dim]<minVal[dim]) or (counter==0)):
minVal[dim]=sphereMin[dim]
minId[dim] = b.id
volume += 4.0/3.0*math.pi*sphereRadius*sphereRadius*sphereRadius
mass += b.state.mass
counter += 1
center = (maxVal-minVal)/2.0+minVal
extends = maxVal-minVal
dimensions = {'max':maxVal,'min':minVal,'maxId':maxId,'minId':minId,'center':center,
'extends':extends, 'volume':volume, 'mass':mass, 'number':counter}
return dimensions
#facetsDimensions==================================================
def facetsDimensions(idFacets=[],mask=-1):
"""The function accepts the list of facet id's or list of facets and calculates max and min dimensions, geometrical center.
:param list idFacets: list of spheres
:param int mask: :yref:`Body.mask` for the checked bodies
:return: dictionary with keys ``min`` (minimal dimension, Vector3), ``max`` (maximal dimension, Vector3), ``minId`` (minimal dimension facet Id, Vector3), ``maxId`` (maximal dimension facet Id, Vector3), ``center`` (central point of bounding box, Vector3), ``extends`` (sizes of bounding box, Vector3), ``number`` (number of facets, int),
"""
idFacetsIter=[]
if (len(idFacets)<1):
#check mask
ifFacetMask=[]
if (mask>-1): #The case, when only the mask was given, without list of ids
for i in O.bodies:
if ((i.mask&mask)<>0):
ifFacetMask.append(i.id)
if (len(ifFacetMask)<2):
raise RuntimeWarning("Not enough bodies to analyze with given mask")
else:
idFacetsIter=ifFacetMask
else:
raise RuntimeWarning("Only a list of particles with length > 1 can be analyzed")
else:
idFacetsIter=idFacets
minVal = Vector3.Zero
maxVal = Vector3.Zero
minId = Vector3.Zero
maxId = Vector3.Zero
counter = 0
for i in idFacetsIter:
if (type(i).__name__=='int'):
b = O.bodies[i] #We have received a list of ID's
elif (type(i).__name__=='Body'):
b = i #We have recevied a list of bodies
else:
raise TypeError("Unknow type of data, should be list of int's or bodies's")
if (b):
p = b.state.pos
o = b.state.ori
s = b.shape
pt1 = p + o*s.vertices[0]
pt2 = p + o*s.vertices[1]
pt3 = p + o*s.vertices[2]
if (mask>-1) and ((mask&b.mask)==0): continue #skip bodies with wrong mask
facetMax = Vector3(max(pt1[0], pt2[0], pt3[0]), max(pt1[1], pt2[1], pt3[1]), max(pt1[2], pt2[2], pt3[2]))
facetMin = Vector3(min(pt1[0], pt2[0], pt3[0]), min(pt1[1], pt2[1], pt3[1]), min(pt1[2], pt2[2], pt3[2]))
for dim in range(0,3):
if ((facetMax[dim]>maxVal[dim]) or (counter==0)):
maxVal[dim]=facetMax[dim]
maxId[dim] = b.id
if ((facetMin[dim]<minVal[dim]) or (counter==0)):
minVal[dim]=facetMin[dim]
minId[dim] = b.id
counter += 1
center = (maxVal-minVal)/2.0+minVal
extends = maxVal-minVal
dimensions = {'max':maxVal,'min':minVal,'maxId':maxId,'minId':minId,'center':center,
'extends':extends, 'number':counter}
return dimensions
#spheresPackDimensions==================================================
def spheresModify(idSpheres=[],mask=-1,shift=Vector3.Zero,scale=1.0,orientation=Quaternion((0,1,0),0.0),copy=False):
"""The function accepts the list of spheres id's or list of bodies and modifies them: rotating, scaling, shifting.
if copy=True copies bodies and modifies them.
Also the mask can be given. If idSpheres not empty, the function affects only bodies, where the mask passes.
If idSpheres is empty, the function search for bodies, where the mask passes.
:param Vector3 shift: Vector3(X,Y,Z) parameter moves spheres.
:param float scale: factor scales given spheres.
:param Quaternion orientation: orientation of spheres
:param int mask: :yref:`Body.mask` for the checked bodies
:returns: list of bodies if copy=True, and Boolean value if copy=False
"""
idSpheresIter=[]
if (len(idSpheres)==0):
#check mask
ifSpherMask=[]
if (mask>-1): #The case, when only the mask was given, without list of ids
for i in O.bodies:
if ((i.mask&mask)<>0):
ifSpherMask.append(i.id)
if (len(ifSpherMask)==0):
raise RuntimeWarning("No bodies to modify with given mask")
else:
idSpheresIter=ifSpherMask
else:
raise RuntimeWarning("No bodies to modify")
else:
idSpheresIter=idSpheres
dims = spheresPackDimensions(idSpheresIter)
ret=[]
for i in idSpheresIter:
if (type(i).__name__=='int'):
b = O.bodies[i] #We have received a list of ID's
elif (type(i).__name__=='Body'):
b = i #We have recevied a list of bodies
else:
raise TypeError("Unknown type of data, should be list of int's or bodies")
try:
sphereRadius=b.shape.radius #skip non-spheres
except AttributeError: continue
if (mask>-1) and ((mask&b.mask)==0): continue #skip bodies with wrong mask
if (copy): b=sphereDuplicate(b)
b.state.pos=orientation*(b.state.pos-dims['center'])+dims['center']
b.shape.radius*=scale
b.state.pos=(b.state.pos-dims['center'])*scale + dims['center']
b.state.pos+=shift
if (copy): ret.append(b)
if (copy):
return ret
else:
return True
#spheresDublicate=======================================================
def sphereDuplicate(idSphere):
"""The functions makes a copy of sphere"""
i=idSphere
if (type(i).__name__=='int'):
b = O.bodies[i] #We have received a list of ID's
elif (type(i).__name__=='Body'):
b = i #We have recevied a list of bodies
else:
raise TypeError("Unknown type of data, should be list of int's or bodies")
try:
sphereRadius=b.shape.radius #skip non-spheres
except AttributeError:
return False
addedBody = utils.sphere(center=b.state.pos,radius=b.shape.radius,fixed=not(b.dynamic),wire=b.shape.wire,color=b.shape.color,highlight=b.shape.highlight,material=b.material,mask=b.mask)
return addedBody
| ThomasSweijen/TPF | py/bodiesHandling.py | Python | gpl-2.0 | 8,305 |
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
from cfme import test_requirements
from cfme.rest.gen_data import a_provider as _a_provider
from cfme.rest.gen_data import vm as _vm
from cfme.utils import error
from cfme.utils.rest import assert_response, delete_resources_from_collection
from cfme.utils.wait import wait_for
pytestmark = [test_requirements.provision]
@pytest.fixture(scope="function")
def a_provider(request):
return _a_provider(request)
@pytest.fixture(scope="function")
def vm_name(request, a_provider, appliance):
return _vm(request, a_provider, appliance.rest_api)
@pytest.mark.parametrize(
'from_detail', [True, False],
ids=['from_detail', 'from_collection'])
def test_edit_vm(request, vm_name, appliance, from_detail):
"""Tests edit VMs using REST API.
Testing BZ 1428250.
Metadata:
test_flag: rest
"""
vm = appliance.rest_api.collections.vms.get(name=vm_name)
request.addfinalizer(vm.action.delete)
new_description = 'Test REST VM {}'.format(fauxfactory.gen_alphanumeric(5))
payload = {'description': new_description}
if from_detail:
edited = vm.action.edit(**payload)
assert_response(appliance)
else:
payload.update(vm._ref_repr())
edited = appliance.rest_api.collections.vms.action.edit(payload)
assert_response(appliance)
edited = edited[0]
record, __ = wait_for(
lambda: appliance.rest_api.collections.vms.find_by(
description=new_description) or False,
num_sec=100,
delay=5,
)
vm.reload()
assert vm.description == edited.description == record[0].description
@pytest.mark.tier(3)
@pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE'])
def test_delete_vm_from_detail(vm_name, appliance, method):
vm = appliance.rest_api.collections.vms.get(name=vm_name)
del_action = getattr(vm.action.delete, method.upper())
del_action()
assert_response(appliance)
wait_for(
lambda: not appliance.rest_api.collections.vms.find_by(name=vm_name), num_sec=300, delay=10)
with error.expected('ActiveRecord::RecordNotFound'):
del_action()
assert_response(appliance, http_status=404)
@pytest.mark.tier(3)
def test_delete_vm_from_collection(vm_name, appliance):
vm = appliance.rest_api.collections.vms.get(name=vm_name)
collection = appliance.rest_api.collections.vms
delete_resources_from_collection(collection, [vm], not_found=True, num_sec=300, delay=10)
| jkandasa/integration_tests | cfme/tests/infrastructure/test_vm_rest.py | Python | gpl-2.0 | 2,531 |
# -*- coding: utf-8 -*-
# This file is part of the Horus Project
__author__ = 'Jesús Arroyo Torrens <[email protected]>'
__copyright__ = 'Copyright (C) 2014-2016 Mundo Reader S.L.'
__license__ = 'GNU General Public License v2 http://www.gnu.org/licenses/gpl2.html'
from horus.engine.driver.driver import Driver
from horus.engine.scan.ciclop_scan import CiclopScan
from horus.engine.scan.current_video import CurrentVideo
from horus.engine.calibration.pattern import Pattern
from horus.engine.calibration.calibration_data import CalibrationData
from horus.engine.calibration.camera_intrinsics import CameraIntrinsics
from horus.engine.calibration.autocheck import Autocheck
from horus.engine.calibration.laser_triangulation import LaserTriangulation
from horus.engine.calibration.platform_extrinsics import PlatformExtrinsics
from horus.engine.calibration.combo_calibration import ComboCalibration
from horus.engine.algorithms.image_capture import ImageCapture
from horus.engine.algorithms.image_detection import ImageDetection
from horus.engine.algorithms.laser_segmentation import LaserSegmentation
from horus.engine.algorithms.point_cloud_generation import PointCloudGeneration
from horus.engine.algorithms.point_cloud_roi import PointCloudROI
# Instances of engine modules
driver = Driver()
ciclop_scan = CiclopScan()
current_video = CurrentVideo()
pattern = Pattern()
calibration_data = CalibrationData()
camera_intrinsics = CameraIntrinsics()
scanner_autocheck = Autocheck()
laser_triangulation = LaserTriangulation()
platform_extrinsics = PlatformExtrinsics()
combo_calibration = ComboCalibration()
image_capture = ImageCapture()
image_detection = ImageDetection()
laser_segmentation = LaserSegmentation()
point_cloud_generation = PointCloudGeneration()
point_cloud_roi = PointCloudROI()
| bqlabs/horus | src/horus/gui/engine.py | Python | gpl-2.0 | 1,802 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from future.utils import lrange
ALL_RESULTS = lrange(7)
SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY, CANCELLED = ALL_RESULTS
Results = ["success", "warnings", "failure", "skipped", "exception", "retry", "cancelled"]
def statusToString(status):
if status is None:
return "not finished"
if status < 0 or status >= len(Results):
return "Invalid status"
else:
return Results[status]
def worst_status(a, b):
# SKIPPED > SUCCESS > WARNINGS > FAILURE > EXCEPTION > RETRY > CANCELLED
# CANCELLED needs to be considered the worst.
for s in (CANCELLED, RETRY, EXCEPTION, FAILURE, WARNINGS, SUCCESS, SKIPPED):
if s in (a, b):
return s
def computeResultAndTermination(obj, result, previousResult):
possible_overall_result = result
terminate = False
if result == FAILURE:
if not obj.flunkOnFailure:
possible_overall_result = SUCCESS
if obj.warnOnFailure:
possible_overall_result = WARNINGS
if obj.flunkOnFailure:
possible_overall_result = FAILURE
if obj.haltOnFailure:
terminate = True
elif result == WARNINGS:
if not obj.warnOnWarnings:
possible_overall_result = SUCCESS
else:
possible_overall_result = WARNINGS
if obj.flunkOnWarnings:
possible_overall_result = FAILURE
elif result in (EXCEPTION, RETRY, CANCELLED):
terminate = True
result = worst_status(previousResult, possible_overall_result)
return result, terminate
class ResultComputingConfigMixin(object):
haltOnFailure = False
flunkOnWarnings = False
flunkOnFailure = True
warnOnWarnings = False
warnOnFailure = False
resultConfig = [
"haltOnFailure",
"flunkOnWarnings",
"flunkOnFailure",
"warnOnWarnings",
"warnOnFailure",
]
| Lekensteyn/buildbot | master/buildbot/process/results.py | Python | gpl-2.0 | 2,689 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
""" Product transfer management """
# pylint: enable=E1101
from decimal import Decimal
from kiwi.currency import currency
from storm.expr import Join, LeftJoin, Sum, Cast, Coalesce, And
from storm.info import ClassAlias
from storm.references import Reference
from zope.interface import implementer
from stoqlib.database.expr import NullIf
from stoqlib.database.properties import (DateTimeCol, IdCol, IdentifierCol,
IntCol, PriceCol, QuantityCol,
UnicodeCol, EnumCol)
from stoqlib.database.viewable import Viewable
from stoqlib.domain.base import Domain
from stoqlib.domain.fiscal import Invoice
from stoqlib.domain.product import ProductHistory, StockTransactionHistory
from stoqlib.domain.person import Person, Branch, Company
from stoqlib.domain.interfaces import IContainer, IInvoice, IInvoiceItem
from stoqlib.domain.sellable import Sellable
from stoqlib.domain.taxes import InvoiceItemIcms, InvoiceItemIpi
from stoqlib.lib.dateutils import localnow
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
@implementer(IInvoiceItem)
class TransferOrderItem(Domain):
"""Transfer order item
"""
__storm_table__ = 'transfer_order_item'
sellable_id = IdCol()
# FIXME: This should be a product, since it does not make sense to transfer
# serviçes
#: The |sellable| to transfer
sellable = Reference(sellable_id, 'Sellable.id')
batch_id = IdCol()
#: If the sellable is a storable, the |batch| that was transfered
batch = Reference(batch_id, 'StorableBatch.id')
transfer_order_id = IdCol()
#: The |transfer| this item belongs to
transfer_order = Reference(transfer_order_id, 'TransferOrder.id')
#: The quantity to transfer
quantity = QuantityCol()
#: Average cost of the item in the source branch at the time of transfer.
stock_cost = PriceCol(default=0)
icms_info_id = IdCol()
#: the :class:`stoqlib.domain.taxes.InvoiceItemIcms` tax for *self*
icms_info = Reference(icms_info_id, 'InvoiceItemIcms.id')
ipi_info_id = IdCol()
#: the :class:`stoqlib.domain.taxes.InvoiceItemIpi` tax for *self*
ipi_info = Reference(ipi_info_id, 'InvoiceItemIpi.id')
item_discount = Decimal('0')
def __init__(self, store=None, **kwargs):
if not 'sellable' in kwargs:
raise TypeError('You must provide a sellable argument')
kwargs['ipi_info'] = InvoiceItemIpi(store=store)
kwargs['icms_info'] = InvoiceItemIcms(store=store)
super(TransferOrderItem, self).__init__(store=store, **kwargs)
product = self.sellable.product
if product:
self.ipi_info.set_item_tax(self)
self.icms_info.set_item_tax(self)
#
# IInvoiceItem implementation
#
@property
def parent(self):
return self.transfer_order
@property
def base_price(self):
return self.stock_cost
@property
def price(self):
return self.stock_cost
@property
def nfe_cfop_code(self):
source_branch = self.transfer_order.source_branch
source_address = source_branch.person.get_main_address()
destination_branch = self.transfer_order.destination_branch
destination_address = destination_branch.person.get_main_address()
same_state = True
if (source_address.city_location.state != destination_address.city_location.state):
same_state = False
if same_state:
return u'5152'
else:
return u'6152'
#
# Public API
#
def get_total(self):
"""Returns the total cost of a transfer item eg quantity * cost"""
return self.quantity * self.sellable.cost
def send(self):
"""Sends this item to it's destination |branch|.
This method should never be used directly, and to send a transfer you
should use TransferOrder.send().
"""
product = self.sellable.product
if product.manage_stock:
storable = product.storable
storable.decrease_stock(self.quantity,
self.transfer_order.source_branch,
StockTransactionHistory.TYPE_TRANSFER_TO,
self.id, batch=self.batch)
ProductHistory.add_transfered_item(self.store,
self.transfer_order.source_branch,
self)
def receive(self):
"""Receives this item, increasing the quantity in the stock.
This method should never be used directly, and to receive a transfer
you should use TransferOrder.receive().
"""
product = self.sellable.product
if product.manage_stock:
storable = product.storable
storable.increase_stock(self.quantity,
self.transfer_order.destination_branch,
StockTransactionHistory.TYPE_TRANSFER_FROM,
self.id, unit_cost=self.stock_cost,
batch=self.batch)
@implementer(IContainer)
@implementer(IInvoice)
class TransferOrder(Domain):
""" Transfer Order class
"""
__storm_table__ = 'transfer_order'
STATUS_PENDING = u'pending'
STATUS_SENT = u'sent'
STATUS_RECEIVED = u'received'
statuses = {STATUS_PENDING: _(u'Pending'),
STATUS_SENT: _(u'Sent'),
STATUS_RECEIVED: _(u'Received')}
status = EnumCol(default=STATUS_PENDING)
#: A numeric identifier for this object. This value should be used instead
#: of :obj:`Domain.id` when displaying a numerical representation of this
#: object to the user, in dialogs, lists, reports and such.
identifier = IdentifierCol()
#: The date the order was created
open_date = DateTimeCol(default_factory=localnow)
#: The date the order was received
receival_date = DateTimeCol()
#: The invoice number of the transfer
invoice_number = IntCol()
#: Comments of a transfer
comments = UnicodeCol()
source_branch_id = IdCol()
#: The |branch| sending the stock
source_branch = Reference(source_branch_id, 'Branch.id')
destination_branch_id = IdCol()
#: The |branch| receiving the stock
destination_branch = Reference(destination_branch_id, 'Branch.id')
source_responsible_id = IdCol()
#: The |employee| responsible for the |transfer| at source |branch|
source_responsible = Reference(source_responsible_id, 'Employee.id')
destination_responsible_id = IdCol()
#: The |employee| responsible for the |transfer| at destination |branch|
destination_responsible = Reference(destination_responsible_id,
'Employee.id')
#: |payments| generated by this transfer
payments = None
#: |transporter| used in transfer
transporter = None
invoice_id = IdCol()
#: The |invoice| generated by the transfer
invoice = Reference(invoice_id, 'Invoice.id')
def __init__(self, store=None, **kwargs):
kwargs['invoice'] = Invoice(store=store, invoice_type=Invoice.TYPE_OUT)
super(TransferOrder, self).__init__(store=store, **kwargs)
#
# IContainer implementation
#
def get_items(self):
return self.store.find(TransferOrderItem, transfer_order=self)
def add_item(self, item):
assert self.status == self.STATUS_PENDING
item.transfer_order = self
def remove_item(self, item):
if item.transfer_order is not self:
raise ValueError(_('The item does not belong to this '
'transfer order'))
item.transfer_order = None
self.store.maybe_remove(item)
#
# IInvoice implementation
#
@property
def discount_value(self):
return currency(0)
@property
def invoice_subtotal(self):
subtotal = self.get_items().sum(TransferOrderItem.quantity *
TransferOrderItem.stock_cost)
return currency(subtotal)
@property
def invoice_total(self):
return self.invoice_subtotal
@property
def recipient(self):
return self.destination_branch.person
@property
def operation_nature(self):
# TODO: Save the operation nature in new transfer_order table field
return _(u"Transfer")
#
# Public API
#
@property
def branch(self):
return self.source_branch
@property
def status_str(self):
return(self.statuses[self.status])
def add_sellable(self, sellable, batch, quantity=1, cost=None):
"""Add the given |sellable| to this |transfer|.
:param sellable: The |sellable| we are transfering
:param batch: What |batch| of the storable (represented by sellable) we
are transfering.
:param quantity: The quantity of this product that is being transfered.
"""
assert self.status == self.STATUS_PENDING
self.validate_batch(batch, sellable=sellable)
product = sellable.product
if product.manage_stock:
stock_item = product.storable.get_stock_item(
self.source_branch, batch)
stock_cost = stock_item.stock_cost
else:
stock_cost = sellable.cost
return TransferOrderItem(store=self.store,
transfer_order=self,
sellable=sellable,
batch=batch,
quantity=quantity,
stock_cost=cost or stock_cost)
def can_send(self):
return (self.status == self.STATUS_PENDING and
self.get_items().count() > 0)
def can_receive(self):
return self.status == self.STATUS_SENT
def send(self):
"""Sends a transfer order to the destination branch.
"""
assert self.can_send()
for item in self.get_items():
item.send()
# Save invoice number, operation_nature and branch in Invoice table.
self.invoice.invoice_number = self.invoice_number
self.invoice.operation_nature = self.operation_nature
self.invoice.branch = self.branch
self.status = self.STATUS_SENT
def receive(self, responsible, receival_date=None):
"""Confirms the receiving of the transfer order.
"""
assert self.can_receive()
for item in self.get_items():
item.receive()
self.receival_date = receival_date or localnow()
self.destination_responsible = responsible
self.status = self.STATUS_RECEIVED
@classmethod
def get_pending_transfers(cls, store, branch):
"""Get all the transfers that need to be recieved
Get all transfers that have STATUS_SENT and the current branch as the destination
This is useful if you want to list all the items that need to be
recieved in a certain branch
"""
return store.find(cls, And(cls.status == cls.STATUS_SENT,
cls.destination_branch == branch))
def get_source_branch_name(self):
"""Returns the source |branch| name"""
return self.source_branch.get_description()
def get_destination_branch_name(self):
"""Returns the destination |branch| name"""
return self.destination_branch.get_description()
def get_source_responsible_name(self):
"""Returns the name of the |employee| responsible for the transfer
at source |branch|
"""
return self.source_responsible.person.name
def get_destination_responsible_name(self):
"""Returns the name of the |employee| responsible for the transfer
at destination |branch|
"""
if not self.destination_responsible:
return u''
return self.destination_responsible.person.name
def get_total_items_transfer(self):
"""Retuns the |transferitems| quantity
"""
return sum([item.quantity for item in self.get_items()], 0)
class BaseTransferView(Viewable):
BranchDest = ClassAlias(Branch, 'branch_dest')
PersonDest = ClassAlias(Person, 'person_dest')
CompanyDest = ClassAlias(Company, 'company_dest')
transfer_order = TransferOrder
identifier = TransferOrder.identifier
identifier_str = Cast(TransferOrder.identifier, 'text')
status = TransferOrder.status
open_date = TransferOrder.open_date
receival_date = TransferOrder.receival_date
source_branch_id = TransferOrder.source_branch_id
destination_branch_id = TransferOrder.destination_branch_id
source_branch_name = Coalesce(NullIf(Company.fancy_name, u''), Person.name)
destination_branch_name = Coalesce(NullIf(CompanyDest.fancy_name, u''),
PersonDest.name)
group_by = [TransferOrder, source_branch_name, destination_branch_name]
tables = [
TransferOrder,
Join(TransferOrderItem,
TransferOrder.id == TransferOrderItem.transfer_order_id),
# Source
LeftJoin(Branch, TransferOrder.source_branch_id == Branch.id),
LeftJoin(Person, Branch.person_id == Person.id),
LeftJoin(Company, Company.person_id == Person.id),
# Destination
LeftJoin(BranchDest, TransferOrder.destination_branch_id == BranchDest.id),
LeftJoin(PersonDest, BranchDest.person_id == PersonDest.id),
LeftJoin(CompanyDest, CompanyDest.person_id == PersonDest.id),
]
@property
def branch(self):
# We need this property for the acronym to appear in the identifier
return self.store.get(Branch, self.source_branch_id)
class TransferOrderView(BaseTransferView):
id = TransferOrder.id
# Aggregates
total_items = Sum(TransferOrderItem.quantity)
class TransferItemView(BaseTransferView):
id = TransferOrderItem.id
item_quantity = TransferOrderItem.quantity
item_description = Sellable.description
group_by = BaseTransferView.group_by[:]
group_by.extend([TransferOrderItem, Sellable])
tables = BaseTransferView.tables[:]
tables.append(Join(Sellable, Sellable.id == TransferOrderItem.sellable_id))
| andrebellafronte/stoq | stoqlib/domain/transfer.py | Python | gpl-2.0 | 15,361 |
#! /usr/bin/env python
#
# Copyright (C) 2015 Open Information Security Foundation
#
# You can copy, redistribute or modify this Program under the terms of
# the GNU General Public License version 2 as published by the Free
# Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# version 2 along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
# This script generates DNP3 related source code based on definitions
# of DNP3 objects (currently the object structs).
from __future__ import print_function
import sys
import re
from cStringIO import StringIO
import yaml
import types
import jinja2
IN_PLACE_START = "/* START GENERATED CODE */"
IN_PLACE_END = "/* END GENERATED CODE */"
util_lua_dnp3_objects_c_template = """/* Copyright (C) 2015 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/**
* DO NOT EDIT. THIS FILE IS AUTO-GENERATED.
*
* Generated by command:
* {{command_line}}
*/
#include "suricata-common.h"
#include "app-layer-dnp3.h"
#include "app-layer-dnp3-objects.h"
#ifdef HAVE_LUA
#include <lua.h>
#include <lualib.h>
#include <lauxlib.h>
#include "util-lua.h"
#include "util-lua-dnp3-objects.h"
/**
* \\brief Push an object point item onto the stack.
*/
void DNP3PushPoint(lua_State *luastate, DNP3Object *object,
DNP3Point *point)
{
switch (DNP3_OBJECT_CODE(object->group, object->variation)) {
{% for object in objects %}
case DNP3_OBJECT_CODE({{object.group}}, {{object.variation}}): {
DNP3ObjectG{{object.group}}V{{object.variation}} *data = point->data;
{% for field in object.fields %}
{% if is_integer_type(field.type) %}
lua_pushliteral(luastate, "{{field.name}}");
lua_pushinteger(luastate, data->{{field.name}});
lua_settable(luastate, -3);
{% elif field["type"] in ["flt32", "flt64"] %}
lua_pushliteral(luastate, "{{field.name}}");
lua_pushnumber(luastate, data->{{field.name}});
lua_settable(luastate, -3);
{% elif field["type"] == "chararray" %}
lua_pushliteral(luastate, "{{field.name}}");
LuaPushStringBuffer(luastate, (uint8_t *)data->{{field.name}},
strlen(data->{{field.name}}));
lua_settable(luastate, -3);
{% elif field["type"] == "vstr4" %}
lua_pushliteral(luastate, "{{field.name}}");
LuaPushStringBuffer(luastate, (uint8_t *)data->{{field.name}},
strlen(data->{{field.name}}));
lua_settable(luastate, -3);
{% elif field.type == "bytearray" %}
lua_pushliteral(luastate, "{{field.name}}");
lua_pushlstring(luastate, (const char *)data->{{field.name}},
data->{{field.len_field}});
lua_settable(luastate, -3);
{% elif field.type == "bstr8" %}
{% for field in field.fields %}
lua_pushliteral(luastate, "{{field.name}}");
lua_pushinteger(luastate, data->{{field.name}});
lua_settable(luastate, -3);
{% endfor %}
{% else %}
{{ raise("Unhandled datatype: %s" % (field.type)) }}
{% endif %}
{% endfor %}
break;
}
{% endfor %}
default:
break;
}
}
#endif /* HAVE_LUA */
"""
output_json_dnp3_objects_template = """/* Copyright (C) 2015 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/**
* DO NOT EDIT. THIS FILE IS AUTO-GENERATED.
*
* Generated by command:
* {{command_line}}
*/
#include "suricata-common.h"
#include "util-crypt.h"
#include "app-layer-dnp3.h"
#include "app-layer-dnp3-objects.h"
#include "output-json-dnp3-objects.h"
#ifdef HAVE_LIBJANSSON
void OutputJsonDNP3SetItem(json_t *js, DNP3Object *object,
DNP3Point *point)
{
switch (DNP3_OBJECT_CODE(object->group, object->variation)) {
{% for object in objects %}
case DNP3_OBJECT_CODE({{object.group}}, {{object.variation}}): {
DNP3ObjectG{{object.group}}V{{object.variation}} *data = point->data;
{% for field in object.fields %}
{% if is_integer_type(field.type) %}
json_object_set_new(js, "{{field.name}}",
json_integer(data->{{field.name}}));
{% elif field.type in ["flt32", "flt64"] %}
json_object_set_new(js, "{{field.name}}",
json_real(data->{{field.name}}));
{% elif field.type == "bytearray" %}
unsigned long {{field.name}}_b64_len = data->{{field.len_field}} * 2;
uint8_t {{field.name}}_b64[{{field.name}}_b64_len];
Base64Encode(data->{{field.name}}, data->{{field.len_field}},
{{field.name}}_b64, &{{field.name}}_b64_len);
json_object_set_new(js, "data->{{field.name}}",
json_string((char *){{field.name}}_b64));
{% elif field.type == "vstr4" %}
json_object_set_new(js, "data->{{field.name}}", json_string(data->{{field.name}}));
{% elif field.type == "chararray" %}
if (data->{{field.len_field}} > 0) {
/* First create a null terminated string as not all versions
* of jansson have json_stringn. */
char tmpbuf[data->{{field.len_field}} + 1];
memcpy(tmpbuf, data->{{field.name}}, data->{{field.len_field}});
tmpbuf[data->{{field.len_field}}] = '\\0';
json_object_set_new(js, "{{field.name}}", json_string(tmpbuf));
} else {
json_object_set_new(js, "{{field.name}}", json_string(""));
}
{% elif field.type == "bstr8" %}
{% for field in field.fields %}
json_object_set_new(js, "{{field.name}}",
json_integer(data->{{field.name}}));
{% endfor %}
{% else %}
{{ raise("Unhandled datatype: %s" % (field.type)) }}
{% endif %}
{% endfor %}
break;
}
{% endfor %}
default:
SCLogDebug("Unknown object: %d:%d", object->group,
object->variation);
break;
}
}
#endif /* HAVE_LIBJANSSON */
"""
def has_freeable_types(fields):
freeable_types = [
"bytearray",
]
for field in fields:
if field["type"] in freeable_types:
return True
return False
def is_integer_type(datatype):
integer_types = [
"uint64",
"uint32",
"uint24",
"uint16",
"uint8",
"int64",
"int32",
"int16",
"int8",
"dnp3time",
]
return datatype in integer_types
def to_type(datatype):
type_map = {
"uint8": "uint8_t",
}
if datatype in type_map:
return type_map[datatype]
else:
raise Exception("Unknown datatype: %s" % (datatype))
def generate(template, filename, context):
print("Generating %s." % (filename))
try:
env = jinja2.Environment(trim_blocks=True)
output = env.from_string(template).render(context)
with open(filename, "w") as fileobj:
fileobj.write(output)
except Exception as err:
print("Failed to generate %s: %s" % (filename, err))
sys.exit(1)
def raise_helper(msg):
raise Exception(msg)
def gen_object_structs(context):
""" Generate structs for all the define DNP3 objects. """
template = """
/* Code generated by:
* {{command_line}}
*/
{% for object in objects %}
typedef struct DNP3ObjectG{{object.group}}V{{object.variation}}_ {
{% for field in object.fields %}
{% if field.type == "bstr8" %}
{% for field in field.fields %}
uint8_t {{field.name}}:{{field.width}};
{% endfor %}
{% else %}
{% if field.type == "int16" %}
int16_t {{field.name}};
{% elif field.type == "int32" %}
int32_t {{field.name}};
{% elif field.type == "uint8" %}
uint8_t {{field.name}};
{% elif field.type == "uint16" %}
uint16_t {{field.name}};
{% elif field.type == "uint24" %}
uint32_t {{field.name}};
{% elif field.type == "uint32" %}
uint32_t {{field.name}};
{% elif field.type == "uint64" %}
uint64_t {{field.name}};
{% elif field.type == "flt32" %}
float {{field.name}};
{% elif field.type == "flt64" %}
double {{field.name}};
{% elif field.type == "dnp3time" %}
uint64_t {{field.name}};
{% elif field.type == "bytearray" %}
uint8_t *{{field.name}};
{% elif field.type == "vstr4" %}
char {{field.name}}[5];
{% elif field.type == "chararray" %}
char {{field.name}}[{{field.size}}];
{% else %}
{{ raise("Unknown datatype type '%s' for object %d:%d" % (
field.type, object.group, object.variation)) }}
{% endif %}
{% endif %}
{% endfor %}
{% if object.extra_fields %}
{% for field in object.extra_fields %}
{% if field.type == "uint8" %}
uint8_t {{field.name}};
{% elif field.type == "uint16" %}
uint16_t {{field.name}};
{% elif field.type == "uint32" %}
uint32_t {{field.name}};
{% else %}
{{ raise("Unknown datatype: %s" % (field.type)) }}
{% endif %}
{% endfor %}
{% endif %}
} DNP3ObjectG{{object.group}}V{{object.variation}};
{% endfor %}
"""
filename = "src/app-layer-dnp3-objects.h"
try:
env = jinja2.Environment(trim_blocks=True)
code = env.from_string(template).render(context)
content = open(filename).read()
content = re.sub(
"(%s).*(%s)" % (re.escape(IN_PLACE_START), re.escape(IN_PLACE_END)),
r"\1%s\2" % (code), content, 1, re.M | re.DOTALL)
open(filename, "w").write(content)
print("Updated %s." % (filename))
except Exception as err:
print("Failed to update %s: %s" % (filename, err), file=sys.stderr)
sys.exit(1)
def gen_object_decoders(context):
""" Generate decoders for all defined DNP3 objects. """
template = """
/* Code generated by:
* {{command_line}}
*/
{% for object in objects %}
{% if object.packed %}
static int DNP3DecodeObjectG{{object.group}}V{{object.variation}}(const uint8_t **buf, uint32_t *len,
uint8_t prefix_code, uint32_t start, uint32_t count,
DNP3PointList *points)
{
DNP3ObjectG{{object.group}}V{{object.variation}} *object = NULL;
int bytes = (count / 8) + 1;
uint32_t prefix = 0;
int point_index = start;
if (!DNP3ReadPrefix(buf, len, prefix_code, &prefix)) {
goto error;
}
for (int i = 0; i < bytes; i++) {
uint8_t octet;
if (!DNP3ReadUint8(buf, len, &octet)) {
goto error;
}
for (int j = 0; j < 8 && count; j = j + {{object.fields[0].width}}) {
object = SCCalloc(1, sizeof(*object));
if (unlikely(object == NULL)) {
goto error;
}
{% if object.fields[0].width == 1 %}
object->{{object.fields[0].name}} = (octet >> j) & 0x1;
{% elif object.fields[0].width == 2 %}
object->{{object.fields[0].name}} = (octet >> j) & 0x3;
{% else %}
#error "Unhandled field width: {{object.fields[0].width}}"
{% endif %}
if (!DNP3AddPoint(points, object, point_index, prefix_code, prefix)) {
goto error;
}
object = NULL;
count--;
point_index++;
}
}
return 1;
error:
if (object != NULL) {
SCFree(object);
}
return 0;
}
{% else %}
static int DNP3DecodeObjectG{{object.group}}V{{object.variation}}(const uint8_t **buf, uint32_t *len,
uint8_t prefix_code, uint32_t start, uint32_t count,
DNP3PointList *points)
{
DNP3ObjectG{{object.group}}V{{object.variation}} *object = NULL;
uint32_t prefix = 0;
uint32_t point_index = start;
{% if object._track_offset %}
uint32_t offset;
{% endif %}
{% if object.constraints %}
{% for (key, val) in object.constraints.items() %}
{% if key == "require_size_prefix" %}
if (!DNP3PrefixIsSize(prefix_code)) {
goto error;
}
{% elif key == "require_prefix_code" %}
if (prefix_code != {{val}}) {
goto error;
}
{% else %}
{{ raise("Unhandled constraint: %s" % (key)) }}
{% endif %}
{% endfor %}
{% endif %}
while (count--) {
object = SCCalloc(1, sizeof(*object));
if (unlikely(object == NULL)) {
goto error;
}
if (!DNP3ReadPrefix(buf, len, prefix_code, &prefix)) {
goto error;
}
{% if object._track_offset %}
offset = *len;
{% endif %}
{% for field in object.fields %}
{% if field.type == "int16" %}
if (!DNP3ReadUint16(buf, len, (uint16_t *)&object->{{field.name}})) {
goto error;
}
{% elif field.type == "int32" %}
if (!DNP3ReadUint32(buf, len, (uint32_t *)&object->{{field.name}})) {
goto error;
}
{% elif field.type == "uint8" %}
if (!DNP3ReadUint8(buf, len, &object->{{field.name}})) {
goto error;
}
{% elif field.type == "uint16" %}
if (!DNP3ReadUint16(buf, len, &object->{{field.name}})) {
goto error;
}
{% elif field.type == "uint24" %}
if (!DNP3ReadUint24(buf, len, &object->{{field.name}})) {
goto error;
}
{% elif field.type == "uint32" %}
if (!DNP3ReadUint32(buf, len, &object->{{field.name}})) {
goto error;
}
{% elif field.type == "uint64" %}
if (!DNP3ReadUint64(buf, len, &object->{{field.name}})) {
goto error;
}
{% elif field.type == "flt32" %}
if (!DNP3ReadFloat32(buf, len, &object->{{field.name}})) {
goto error;
}
{% elif field.type == "flt64" %}
if (!DNP3ReadFloat64(buf, len, &object->{{field.name}})) {
goto error;
}
{% elif field.type == "dnp3time" %}
if (!DNP3ReadUint48(buf, len, &object->{{field.name}})) {
goto error;
}
{% elif field.type == "vstr4" %}
if (*len < 4) {
goto error;
}
memcpy(object->{{field.name}}, *buf, 4);
object->{{field.name}}[4] = '\\\\0';
*buf += 4;
*len -= 4;
{% elif field.type == "bytearray" %}
{% if field.len_from_prefix %}
object->{{field.len_field}} = prefix - (offset - *len);
{% endif %}
if (object->{{field.len_field}} > 0) {
if (*len < object->{{field.len_field}}) {
/* Not enough data. */
goto error;
}
object->{{field.name}} = SCCalloc(1, object->{{field.len_field}});
if (unlikely(object->{{field.name}} == NULL)) {
goto error;
}
memcpy(object->{{field.name}}, *buf, object->{{field.len_field}});
*buf += object->{{field.len_field}};
*len -= object->{{field.len_field}};
}
{% elif field.type == "chararray" %}
{% if field.len_from_prefix %}
object->{{field.len_field}} = prefix - (offset - *len);
{% endif %}
if (object->{{field.len_field}} > 0) {
if (*len < object->{{field.len_field}}) {
/* Not enough data. */
goto error;
}
memcpy(object->{{field.name}}, *buf, object->{{field.len_field}});
*buf += object->{{field.len_field}};
*len -= object->{{field.len_field}};
}
object->{{field.name}}[object->{{field.len_field}}] = '\\\\0';
{% elif field.type == "bstr8" %}
{
uint8_t octet;
if (!DNP3ReadUint8(buf, len, &octet)) {
goto error;
}
{% set ns = namespace(shift=0) %}
{% for field in field.fields %}
{% if field.width == 1 %}
object->{{field.name}} = (octet >> {{ns.shift}}) & 0x1;
{% elif field.width == 2 %}
object->{{field.name}} = (octet >> {{ns.shift}}) & 0x3;
{% elif field.width == 4 %}
object->{{field.name}} = (octet >> {{ns.shift}}) & 0xf;
{% elif field.width == 7 %}
object->{{field.name}} = (octet >> {{ns.shift}}) & 0x7f;
{% else %}
{{ raise("Unhandled width of %d." % (field.width)) }}
{% endif %}
{% set ns.shift = ns.shift + field.width %}
{% endfor %}
}
{% else %}
{{ raise("Unhandled datatype '%s' for object %d:%d." % (field.type,
object.group, object.variation)) }}
{% endif %}
{% endfor %}
if (!DNP3AddPoint(points, object, point_index, prefix_code, prefix)) {
goto error;
}
object = NULL;
point_index++;
}
return 1;
error:
if (object != NULL) {
SCFree(object);
}
return 0;
}
{% endif %}
{% endfor %}
void DNP3FreeObjectPoint(int group, int variation, void *point)
{
switch(DNP3_OBJECT_CODE(group, variation)) {
{% for object in objects %}
{% if f_has_freeable_types(object.fields) %}
case DNP3_OBJECT_CODE({{object.group}}, {{object.variation}}): {
DNP3ObjectG{{object.group}}V{{object.variation}} *object = (DNP3ObjectG{{object.group}}V{{object.variation}} *) point;
{% for field in object.fields %}
{% if field.type == "bytearray" %}
if (object->{{field.name}} != NULL) {
SCFree(object->{{field.name}});
}
{% endif %}
{% endfor %}
break;
}
{% endif %}
{% endfor %}
default:
break;
}
SCFree(point);
}
/**
* \\\\brief Decode a DNP3 object.
*
* \\\\retval 0 on success. On failure a positive integer corresponding
* to a DNP3 application layer event will be returned.
*/
int DNP3DecodeObject(int group, int variation, const uint8_t **buf,
uint32_t *len, uint8_t prefix_code, uint32_t start,
uint32_t count, DNP3PointList *points)
{
int rc = 0;
switch (DNP3_OBJECT_CODE(group, variation)) {
{% for object in objects %}
case DNP3_OBJECT_CODE({{object.group}}, {{object.variation}}):
rc = DNP3DecodeObjectG{{object.group}}V{{object.variation}}(buf, len, prefix_code, start, count,
points);
break;
{% endfor %}
default:
return DNP3_DECODER_EVENT_UNKNOWN_OBJECT;
}
return rc ? 0 : DNP3_DECODER_EVENT_MALFORMED;
}
"""
try:
filename = "src/app-layer-dnp3-objects.c"
env = jinja2.Environment(trim_blocks=True, lstrip_blocks=True)
code = env.from_string(template).render(context)
content = open(filename).read()
content = re.sub(
"(%s).*(%s)" % (re.escape(IN_PLACE_START), re.escape(IN_PLACE_END)),
r"\1%s\n\2" % (code), content, 1, re.M | re.DOTALL)
open(filename, "w").write(content)
print("Updated %s." % (filename))
except Exception as err:
print("Failed to update %s: %s" % (filename, err), file=sys.stderr)
sys.exit(1)
def preprocess_object(obj):
valid_keys = [
"group",
"variation",
"constraints",
"extra_fields",
"fields",
"packed",
]
valid_field_keys = [
"type",
"name",
"width",
"len_from_prefix",
"len_field",
"fields",
"size",
]
if "unimplemented" in obj:
print("Object not implemented: %s:%s: %s" % (
str(obj["group"]), str(obj["variation"]), obj["unimplemented"]))
return None
for key, val in obj.items():
if key not in valid_keys:
print("Invalid key '%s' in object %d:%d" % (
key, obj["group"], obj["variation"]), file=sys.stderr)
sys.exit(1)
for field in obj["fields"]:
for key in field.keys():
if key not in valid_field_keys:
print("Invalid key '%s' in object %d:%d" % (
key, obj["group"], obj["variation"]), file=sys.stderr)
sys.exit(1)
if "len_from_prefix" in field and field["len_from_prefix"]:
obj["_track_offset"] = True
break
if field["type"] == "bstr8":
width = 0
for subfield in field["fields"]:
width += int(subfield["width"])
assert(width == 8)
return obj
def main():
# Require Jinja2 2.10 or greater.
jv = jinja2.__version__.split(".")
if int(jv[0]) < 2 or (int(jv[0]) == 2 and int(jv[1]) < 10):
print("error: jinja2 v2.10 or great required")
return 1
definitions = yaml.load(open("scripts/dnp3-gen/dnp3-objects.yaml"))
print("Loaded %s objects." % (len(definitions["objects"])))
definitions["objects"] = map(preprocess_object, definitions["objects"])
# Filter out unimplemented objects.
definitions["objects"] = [
obj for obj in definitions["objects"] if obj != None]
context = {
"raise": raise_helper,
"objects": definitions["objects"],
"is_integer_type": is_integer_type,
"f_to_type": to_type,
"f_has_freeable_types": has_freeable_types,
"command_line": " ".join(sys.argv),
}
gen_object_structs(context)
gen_object_decoders(context)
generate(util_lua_dnp3_objects_c_template,
"src/util-lua-dnp3-objects.c",
context)
generate(output_json_dnp3_objects_template,
"src/output-json-dnp3-objects.c",
context)
if __name__ == "__main__":
sys.exit(main())
| daviddiallo/suricata | scripts/dnp3-gen/dnp3-gen.py | Python | gpl-2.0 | 22,538 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
CreateWorkspace.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import *
from GeoServerToolsAlgorithm import \
GeoServerToolsAlgorithm
from processing.parameters.ParameterString import ParameterString
from processing.outputs.OutputString import OutputString
class CreateWorkspace(GeoServerToolsAlgorithm):
WORKSPACE = 'WORKSPACE'
WORKSPACEURI = 'WORKSPACEURI'
def processAlgorithm(self, progress):
self.createCatalog()
workspaceName = self.getParameterValue(self.WORKSPACE)
workspaceUri = self.getParameterValue(self.WORKSPACEURI)
self.catalog.create_workspace(workspaceName, workspaceUri)
def defineCharacteristics(self):
self.addBaseParameters()
self.name = 'Create workspace'
self.group = 'GeoServer management tools'
self.addParameter(ParameterString(self.WORKSPACE, 'Workspace'))
self.addParameter(ParameterString(self.WORKSPACEURI, 'Workspace URI'))
self.addOutput(OutputString(self.WORKSPACE, 'Workspace'))
| mhugent/Quantum-GIS | python/plugins/processing/algs/admintools/CreateWorkspace.py | Python | gpl-2.0 | 2,079 |
from __future__ import division
"""
These functions are for BOSSANOVA (BOss Survey of Satellites Around Nearby Optically obserVable milky way Analogs)
"""
import numpy as np
from matplotlib import pyplot as plt
import targeting
def count_targets(hsts, verbose=True, remove_cached=True, rvir=300, targetingkwargs={}):
"""
Generates a count of targets for each field.
Parameters
----------
hsts
A list of `NSAHost` objects
verbose : bool
Whether or not to print a message when each host is examined
remove_cached : bool
Whether or not to remove the cached sdss catalog for each host
after counting. This may be necessary to prevent running out of
memory, depending on the number of hosts involved.
rvir : float
"virial radius" in kpc for the arcmin transform
targetingkwargs : dict or list of dicts
passed into ` targeting.select_targets` if a single dictionary, otherwise
the targeting will
Returns
-------
ntargs : astropy.Table
a table object with the names of the hosts and the target counts.
"""
import sys
import collections
from astropy import table
if isinstance(targetingkwargs, collections.Mapping):
colnames = ['ntarg']
targetingkwargs = [targetingkwargs.copy()]
else:
colnames = [('ntarg_' + t.get('colname', str(i))) for i, t in enumerate(targetingkwargs)]
targetingkwargs = [t.copy() for t in targetingkwargs]
for t in targetingkwargs:
t.setdefault('outercutrad', 300)
t.setdefault('removegama', False)
if 'colname' in t:
del t['colname']
nms = []
dists = []
rvs = []
cnts = [[] for t in targetingkwargs]
for i, h in enumerate(hsts):
if verbose:
print 'Generating target count for', h.name, '#', i + 1, 'of', len(hsts)
sys.stdout.flush()
nms.append(h.name)
dists.append(h.distmpc)
rvs.append(h.physical_to_projected(300))
for j, t in enumerate(targetingkwargs):
if verbose:
print 'Targeting parameters:', t
sys.stdout.flush()
tcat = targeting.select_targets(h, **t)
cnts[j].append(len(tcat))
if remove_cached:
h._cached_sdss = None
t = table.Table()
t.add_column(table.Column(name='name', data=nms))
t.add_column(table.Column(name='distmpc', data=dists, units='Mpc'))
t.add_column(table.Column(name='rvirarcmin', data=rvs, units='arcmin'))
for cnm, cnt in zip(colnames, cnts):
t.add_column(table.Column(name=cnm, data=cnt))
return t
_Vabs_mw_sats = {'Bootes I': -6.3099999999999987,
'Bootes II': -2.7000000000000011,
'Bootes III': -5.7500000000000018,
'Canes Venatici I': -8.5900000000000016,
'Canes Venatici II': -4.9199999999999982,
'Canis Major': -14.389999999999999,
'Carina': -9.1099999999999994,
'Coma Berenices': -4.0999999999999996,
'Draco': -8.7999999999999989,
'Fornax': -13.44,
'Hercules': -6.6000000000000014,
'LMC': -18.120000000000001,
'Leo I': -12.02,
'Leo II': -9.8399999999999999,
'Leo IV': -5.8400000000000016,
'Leo V': -5.25,
'Pisces II': -5.0,
'SMC': -16.830000000000002,
'Sagittarius dSph': -13.500000000000002,
'Sculptor': -11.070000000000002,
'Segue I': -1.5,
'Segue II': -2.5,
'Sextans I': -9.2700000000000014,
'Ursa Major I': -5.5299999999999994,
'Ursa Major II': -4.1999999999999993,
'Ursa Minor': -8.7999999999999989,
'Willman 1': -2.6999999999999993}
#now just assume they are all g-r=0.5, ~right for Draco ... Apply Jester+ transforms
_rabs_mw_sats = dict([(k, v + (-0.41 * (0.5) + 0.01)) for k, v in _Vabs_mw_sats.iteritems()])
_sorted_mw_rabs = np.sort(_rabs_mw_sats.values())
def count_mw_sats(h, maglim, mwsatsrmags=_sorted_mw_rabs):
appmags = mwsatsrmags + h.distmod
return np.sum(appmags < maglim)
def generate_count_table(hsts, fnout=None, maglims=[21, 20.5, 20], outercutrad=-90,remove_cached=True):
from astropy.io import ascii
from astropy import table
targetingkwargs = []
for m in maglims:
targetingkwargs.append({'faintlimit': m, 'outercutrad': outercutrad, 'colname': str(m)})
tab = count_targets(hsts, targetingkwargs=targetingkwargs, remove_cached=remove_cached)
for m in maglims:
satcnt = []
for hs in hsts:
satcnt.append(count_mw_sats(hs, m))
tab.add_column(table.Column(name='nsat_' + str(m), data=satcnt))
for m in maglims:
nsatstr = 'nsat_' + str(m)
ntargstr = 'ntarg_' + str(m)
tab.add_column(table.Column(name='ntargpersat_' + str(m), data=tab[ntargstr] / tab[nsatstr]))
if fnout:
ascii.write(tab, fnout)
return tab
| saga-survey/saga-code | bossanova/bossanova.py | Python | gpl-2.0 | 4,779 |
#!/usr/bin/env python
# coding:utf-8
import hashlib
import logging
from x1category import X1Category
TOOL_PREFIX = 'X1Tool'
class X1Tool(object):
'appid:6376477c731a89e3280657eb88422645f2d1e2a684541222e21371f3110110d2'
DEFAULT_METADATA = {'name': "X1Tool", 'author': "admin", 'comments': "default", 'template': "default/index.html", 'category': X1Category.DEFAULT}
def __init__(self, metadata=None):
if metadata is None:
metadata = self.DEFAULT_METADATA
self.__metadata = metadata
@classmethod
def appid(cls):
"""per tool GUID"""
try:
return hashlib.sha224(cls.__name__).hexdigest()
except Exception, e:
logging.error('Fail to get appid: %s' % e)
return "0000000000"
def run(self, args):
return args
def metadata(self, attr_key=None, attr_value=None):
try:
if attr_key is None:
return self.__metadata
if attr_value is not None:
self.__metadata[attr_key] = attr_value
return self.__metadata[attr_key]
except Exception, e:
logging.error('Fail to set attr: %s(%s)' % (attr_key, e))
if __name__ == '__main__':
app = X1Tool()
print app.appid()
print app.metadata()
| cshzc/X1Tool | src/server/apps/x1tool.py | Python | gpl-2.0 | 1,304 |
Subsets and Splits