text
stringlengths 4
1.02M
| meta
dict |
---|---|
from xd.tool.os import *
from case import *
import unittest
import os
import tempfile
class tests(TestCase):
def setUp(self):
super(tests, self).setUp()
self.cwd = self.restore['cwd']
os.chdir(self.cwd)
def test_pushd(self):
self.assertEqual(os.getcwd(), self.cwd)
with pushd(self.testdir):
self.assertEqual(os.getcwd(), self.testdir)
self.assertEqual(os.getcwd(), self.cwd)
def test_pushd_same(self):
os.chdir(self.testdir)
self.assertEqual(os.getcwd(), self.testdir)
with pushd(self.testdir):
self.assertEqual(os.getcwd(), self.testdir)
self.assertEqual(os.getcwd(), self.testdir)
def test_pushd_nonexistant(self):
self.assertEqual(os.getcwd(), self.cwd)
testdir = '/tmp/THIS_DIRECTORY_SHOULD_NOT_EXIST'
self.assertFalse(os.path.exists(testdir))
with self.assertRaises(OSError):
with pushd(testdir):
pass
self.assertEqual(os.getcwd(), self.cwd)
def test_pushd_cwd_nonexistant(self):
with tempfile.TemporaryDirectory(prefix='unittest-') as cwd:
os.chdir(cwd)
os.rmdir(cwd)
with self.assertRaises(OSError):
os.getcwd()
with self.assertRaises(OSError):
with pushd(self.testdir):
self.fail('this should not be reached')
with self.assertRaises(OSError):
os.getcwd()
os.mkdir(cwd)
def test_pushd_cwd_removed(self):
with tempfile.TemporaryDirectory(prefix='unittest-') as cwd:
os.chdir(cwd)
with self.assertRaises(OSError):
with pushd(self.testdir):
os.rmdir(cwd)
self.assertEqual(os.getcwd(), self.testdir)
os.mkdir(cwd)
| {
"content_hash": "55d7dc854ce4fba8cafe1ea2524fb82d",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 68,
"avg_line_length": 32.10344827586207,
"alnum_prop": 0.5848549946294307,
"repo_name": "esben/xd-tool",
"id": "8c0bdb7f41aa95d9508a39c31419d3cc97396cd5",
"size": "1862",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/os_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42858"
},
{
"name": "Shell",
"bytes": "105"
}
],
"symlink_target": ""
} |
"""
Mutated Conway rules. One random gene of each cell mutated.
Maze-like structures growing, then devoured by the 'sea',
from where crystalline landscape begin to form.
"""
import numpy as np
import random
DEATH_SPEED = 1
BIRTH_COST = 3
MAX_GENES = 14
FIELD_WIDTH = 1280
FIELD_HEIGHT = 720
SAVE_FRAMES = False
DOWNSCALE_FACTOR = 1
FRAME_SKIP = 1
RANDOM_SEED = None
def fld_init(a):
conway = a.str2genome("3/23")
return np.asarray([[random.choice([0, 1]) * (conway | (1 << random.randint(0, 17))) for j in range(a.height)] for i in range(a.width)]).astype(np.int32)
#return np.asarray([[(random.choice([0, 1]) * conway) if (i > 400 and i < 800 and j > 300 and j < 500) else 0 for j in range(a.height)] for i in range(a.width)]).astype(np.int32)
| {
"content_hash": "21fd0c5051a7519894f40c7f17e40b00",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 182,
"avg_line_length": 30.48,
"alnum_prop": 0.6824146981627297,
"repo_name": "a5kin/evolife",
"id": "974c65393b26963d7ac72a5a9490447c27136c86",
"size": "762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/conway_mutated.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51452"
}
],
"symlink_target": ""
} |
from spindrift.database.dao import DAO
from spindrift.database.field import Field, coerce_int
class Parent(DAO):
TABLENAME = 'parent'
id = Field(coerce_int, is_primary=True)
foo = Field(coerce_int)
bar = Field(coerce_int, is_nullable=True)
def test_save(db):
def on_save(rc, result):
assert rc == 0
assert isinstance(result.id, int)
db.is_done = True
Parent(foo=10).save(on_save, cursor=db.cursor)
db.run()
def test_simultaneous(db):
def on_save(rc, result):
assert rc != 0
assert result == 'query started before last query ended'
db.is_done = True
Parent(foo=10).save(on_save, cursor=db.cursor)
Parent(foo=10).save(on_save, cursor=db.cursor)
def test_missing_null_field(db):
def on_save(rc, result):
assert rc != 0
assert result == "Column 'foo' cannot be null"
db.is_done = True
Parent().save(on_save, cursor=db.cursor)
db.run()
def test_reload(db):
def on_load(rc, result):
assert rc == 0
assert result.foo == 123
db.is_done = True
def on_save(rc, result):
assert rc == 0
Parent.load(on_load, result.id, cursor=db.cursor)
p = Parent(foo=123)
p.save(on_save, cursor=db.cursor)
db.run()
def test_insert(db):
ID = 7
def on_load(rc, result):
assert rc == 0
assert result.foo == 123
db.is_done = True
def on_insert(rc, result):
assert rc == 0
assert result.id == ID
Parent.load(on_load, result.id, cursor=db.cursor)
p = Parent(foo=123)
p.insert(on_insert, id=ID, cursor=db.cursor)
db.run()
def test_delete(db):
ID = 7
def on_load(rc, result):
assert rc == 0
assert result is None
db.is_done = True
def on_delete(rc, result):
assert rc == 0
Parent.load(on_load, ID, cursor=db.cursor)
def on_insert(rc, result):
assert rc == 0
result.delete(on_delete, cursor=db.cursor)
p = Parent(foo=123)
p.insert(on_insert, id=ID, cursor=db.cursor)
db.run()
| {
"content_hash": "57d4e7fb0e53d372503b987138277fdd",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 64,
"avg_line_length": 20.93069306930693,
"alnum_prop": 0.5860927152317881,
"repo_name": "robertchase/spindrift",
"id": "750e980226cd0c6c5815f25e7056eb281af31371",
"size": "2114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_db/test_save.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "155"
},
{
"name": "Makefile",
"bytes": "872"
},
{
"name": "Python",
"bytes": "296019"
},
{
"name": "Shell",
"bytes": "1363"
},
{
"name": "TSQL",
"bytes": "1285"
}
],
"symlink_target": ""
} |
import os
from enum import Enum, auto
from .gn import GnBuilder
class Cyw30739App(Enum):
LIGHT = auto()
LOCK = auto()
OTA_REQUESTOR = auto()
def ExampleName(self):
if self == Cyw30739App.LIGHT:
return "lighting-app"
elif self == Cyw30739App.LOCK:
return "lock-app"
elif self == Cyw30739App.OTA_REQUESTOR:
return "ota-requestor-app"
else:
raise Exception("Unknown app type: %r" % self)
def AppNamePrefix(self):
if self == Cyw30739App.LIGHT:
return "chip-cyw30739-lighting-example"
elif self == Cyw30739App.LOCK:
return "chip-cyw30739-lock-example"
elif self == Cyw30739App.OTA_REQUESTOR:
return "chip-cyw30739-ota-requestor-example"
else:
raise Exception("Unknown app type: %r" % self)
def BuildRoot(self, root):
return os.path.join(root, "examples", self.ExampleName(), "cyw30739")
class Cyw30739Board(Enum):
CYW930739M2EVB_01 = 1
def GnArgName(self):
if self == Cyw30739Board.CYW930739M2EVB_01:
return "CYW930739M2EVB-01"
else:
raise Exception("Unknown board #: %r" % self)
class Cyw30739Builder(GnBuilder):
def __init__(
self,
root,
runner,
app: Cyw30739App = Cyw30739App.LIGHT,
board: Cyw30739Board = Cyw30739Board.CYW930739M2EVB_01,
release: bool = False,
progress_logging: bool = True
):
super(Cyw30739Builder, self).__init__(
root=app.BuildRoot(root), runner=runner)
self.app = app
self.board = board
self.release = release
self.progress_logging = progress_logging
def GnBuildArgs(self):
args = []
if not self.progress_logging:
args.append('chip_progress_logging=false')
if self.release:
args.append('is_debug=false')
return args
def build_outputs(self):
items = {}
for extension in ["elf", "elf.map"]:
name = "%s.%s" % (self.app.AppNamePrefix(), extension)
items[name] = os.path.join(self.output_dir, name)
return items
| {
"content_hash": "cfffe9799f173f7b535f275e0f4e1766",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 77,
"avg_line_length": 27.9873417721519,
"alnum_prop": 0.5843509724106739,
"repo_name": "nestlabs/connectedhomeip",
"id": "5d4470fef1753540877ee6103fef421ca6feb106",
"size": "2796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/build/builders/cyw30739.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2249120"
},
{
"name": "C++",
"bytes": "17279144"
},
{
"name": "CMake",
"bytes": "126266"
},
{
"name": "Dockerfile",
"bytes": "39266"
},
{
"name": "Emacs Lisp",
"bytes": "1042"
},
{
"name": "Java",
"bytes": "154260"
},
{
"name": "JavaScript",
"bytes": "190569"
},
{
"name": "Jinja",
"bytes": "14915"
},
{
"name": "Kotlin",
"bytes": "177091"
},
{
"name": "Makefile",
"bytes": "7729"
},
{
"name": "Objective-C",
"bytes": "738857"
},
{
"name": "Objective-C++",
"bytes": "295149"
},
{
"name": "Python",
"bytes": "1567221"
},
{
"name": "Shell",
"bytes": "163177"
},
{
"name": "Tcl",
"bytes": "311"
},
{
"name": "ZAP",
"bytes": "582004"
}
],
"symlink_target": ""
} |
from trove.common.views import create_links
from trove.common import cfg
CONF = cfg.CONF
class FlavorView(object):
def __init__(self, flavor, req=None):
self.flavor = flavor
self.req = req
def data(self):
# If the flavor id cannot be cast to an int, we simply return
# no id and rely on str_id instead.
try:
f_id = int(self.flavor.id)
except ValueError:
f_id = None
flavor = {
'id': f_id,
'links': self._build_links(),
'name': self.flavor.name,
'ram': self.flavor.ram,
'disk': self.flavor.disk,
'str_id': str(self.flavor.id),
}
if not CONF.trove_volume_support and CONF.device_path is not None:
flavor['local_storage'] = self.flavor.ephemeral
return {"flavor": flavor}
def _build_links(self):
return create_links("flavors", self.req, self.flavor.id)
class FlavorsView(object):
view = FlavorView
def __init__(self, flavors, req=None):
self.flavors = flavors
self.req = req
def data(self):
data = []
for flavor in self.flavors:
#if int(flavor.id) in (2, 6, 8):
if flavor.name.startswith('rds'):
data.append(self.view(flavor, req=self.req).data()['flavor'])
return {"flavors": data}
| {
"content_hash": "e884e075c464ba49cf4aa26899767466",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 26.037735849056602,
"alnum_prop": 0.5572463768115942,
"repo_name": "daizhengy/RDS",
"id": "88ba141608a890082751cd2f0a3c189e71a15e28",
"size": "2022",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "trove/flavor/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "88"
},
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60526"
},
{
"name": "Python",
"bytes": "2795151"
},
{
"name": "Shell",
"bytes": "4771"
},
{
"name": "XSLT",
"bytes": "50542"
}
],
"symlink_target": ""
} |
import datetime
import uuid
from oslo_serialization import jsonutils
from oslo_utils import netutils
from oslo_utils import timeutils
import routes
import six
from six.moves import range
import webob
import webob.dec
import webob.request
from nova.api import auth as api_auth
from nova.api import openstack as openstack_api
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack import auth
from nova.api.openstack import compute
from nova.api.openstack.compute import limits
from nova.api.openstack.compute import versions
from nova.api.openstack import urlmap
from nova.api.openstack import wsgi as os_wsgi
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import vm_states
from nova import context
from nova.db.sqlalchemy import models
from nova import exception as exc
import nova.netconf
from nova.network import api as network_api
from nova import objects
from nova.objects import base
from nova import quota
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_network
from nova.tests.unit.objects import test_keypair
from nova import utils
from nova import wsgi
QUOTAS = quota.QUOTAS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUIDS = {}
class Context(object):
pass
class FakeRouter(wsgi.Router):
def __init__(self, ext_mgr=None):
pass
@webob.dec.wsgify
def __call__(self, req):
res = webob.Response()
res.status = '200'
res.headers['X-Test-Success'] = 'True'
return res
@webob.dec.wsgify
def fake_wsgi(self, req):
return self.application
def wsgi_app(inner_app_v2=None, fake_auth_context=None,
use_no_auth=False, ext_mgr=None, init_only=None):
if not inner_app_v2:
inner_app_v2 = compute.APIRouter(ext_mgr, init_only)
if use_no_auth:
api_v2 = openstack_api.FaultWrapper(auth.NoAuthMiddleware(
limits.RateLimitingMiddleware(inner_app_v2)))
else:
if fake_auth_context is not None:
ctxt = fake_auth_context
else:
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
api_v2 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
limits.RateLimitingMiddleware(inner_app_v2)))
mapper = urlmap.URLMap()
mapper['/v2'] = api_v2
mapper['/v1.1'] = api_v2
mapper['/'] = openstack_api.FaultWrapper(versions.Versions())
return mapper
def wsgi_app_v21(inner_app_v21=None, fake_auth_context=None,
use_no_auth=False, ext_mgr=None, init_only=None):
if not inner_app_v21:
inner_app_v21 = compute.APIRouterV21(init_only)
if use_no_auth:
api_v21 = openstack_api.FaultWrapper(auth.NoAuthMiddlewareV3(
limits.RateLimitingMiddleware(inner_app_v21)))
else:
if fake_auth_context is not None:
ctxt = fake_auth_context
else:
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
api_v21 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
limits.RateLimitingMiddleware(inner_app_v21)))
mapper = urlmap.URLMap()
mapper['/v2'] = api_v21
mapper['/v2.1'] = api_v21
return mapper
def stub_out_key_pair_funcs(stubs, have_key_pair=True, **kwargs):
def key_pair(context, user_id):
return [dict(test_keypair.fake_keypair,
name='key', public_key='public_key', **kwargs)]
def one_key_pair(context, user_id, name):
if name == 'key':
return dict(test_keypair.fake_keypair,
name='key', public_key='public_key', **kwargs)
else:
raise exc.KeypairNotFound(user_id=user_id, name=name)
def no_key_pair(context, user_id):
return []
if have_key_pair:
stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
stubs.Set(nova.db, 'key_pair_get', one_key_pair)
else:
stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair)
def stub_out_rate_limiting(stubs):
def fake_rate_init(self, app):
super(limits.RateLimitingMiddleware, self).__init__(app)
self.application = app
stubs.Set(nova.api.openstack.compute.limits.RateLimitingMiddleware,
'__init__', fake_rate_init)
stubs.Set(nova.api.openstack.compute.limits.RateLimitingMiddleware,
'__call__', fake_wsgi)
def stub_out_instance_quota(stubs, allowed, quota, resource='instances'):
def fake_reserve(context, **deltas):
requested = deltas.pop(resource, 0)
if requested > allowed:
quotas = dict(instances=1, cores=1, ram=1)
quotas[resource] = quota
usages = dict(instances=dict(in_use=0, reserved=0),
cores=dict(in_use=0, reserved=0),
ram=dict(in_use=0, reserved=0))
usages[resource]['in_use'] = (quotas[resource] * 0.9 -
allowed)
usages[resource]['reserved'] = quotas[resource] * 0.1
raise exc.OverQuota(overs=[resource], quotas=quotas,
usages=usages)
stubs.Set(QUOTAS, 'reserve', fake_reserve)
def stub_out_networking(stubs):
def get_my_ip():
return '127.0.0.1'
stubs.Set(netutils, 'get_my_ipv4', get_my_ip)
def stub_out_compute_api_snapshot(stubs):
def snapshot(self, context, instance, name, extra_properties=None):
# emulate glance rejecting image names which are too long
if len(name) > 256:
raise exc.Invalid
return dict(id='123', status='ACTIVE', name=name,
properties=extra_properties)
stubs.Set(compute_api.API, 'snapshot', snapshot)
class stub_out_compute_api_backup(object):
def __init__(self, stubs):
self.stubs = stubs
self.extra_props_last_call = None
stubs.Set(compute_api.API, 'backup', self.backup)
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
self.extra_props_last_call = extra_properties
props = dict(backup_type=backup_type,
rotation=rotation)
props.update(extra_properties or {})
return dict(id='123', status='ACTIVE', name=name, properties=props)
def stub_out_nw_api_get_instance_nw_info(stubs, num_networks=1, func=None):
fake_network.stub_out_nw_api_get_instance_nw_info(stubs)
def stub_out_nw_api(stubs, cls=None, private=None, publics=None):
if not private:
private = '192.168.0.3'
if not publics:
publics = ['1.2.3.4']
class Fake(object):
def __init__(self, skip_policy_check=False):
pass
def get_instance_nw_info(*args, **kwargs):
pass
def get_floating_ips_by_fixed_address(*args, **kwargs):
return publics
def validate_networks(self, context, networks, max_count):
return max_count
def create_pci_requests_for_sriov_ports(self, context,
system_metadata,
requested_networks):
pass
if cls is None:
cls = Fake
stubs.Set(network_api, 'API', cls)
fake_network.stub_out_nw_api_get_instance_nw_info(stubs)
class FakeToken(object):
id_count = 0
def __getitem__(self, key):
return getattr(self, key)
def __init__(self, **kwargs):
FakeToken.id_count += 1
self.id = FakeToken.id_count
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
class FakeRequestContext(context.RequestContext):
def __init__(self, *args, **kwargs):
kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token')
super(FakeRequestContext, self).__init__(*args, **kwargs)
class HTTPRequest(os_wsgi.Request):
@staticmethod
def blank(*args, **kwargs):
kwargs['base_url'] = 'http://localhost/v2'
use_admin_context = kwargs.pop('use_admin_context', False)
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
out = os_wsgi.Request.blank(*args, **kwargs)
out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
is_admin=use_admin_context)
out.api_version_request = api_version.APIVersionRequest(version)
return out
class HTTPRequestV3(os_wsgi.Request):
@staticmethod
def blank(*args, **kwargs):
kwargs['base_url'] = 'http://localhost/v3'
use_admin_context = kwargs.pop('use_admin_context', False)
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
out = os_wsgi.Request.blank(*args, **kwargs)
out.api_version_request = api_version.APIVersionRequest(version)
out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
is_admin=use_admin_context)
return out
class TestRouter(wsgi.Router):
def __init__(self, controller, mapper=None):
if not mapper:
mapper = routes.Mapper()
mapper.resource("test", "tests",
controller=os_wsgi.Resource(controller))
super(TestRouter, self).__init__(mapper)
class TestRouterV21(wsgi.Router):
def __init__(self, controller, mapper=None):
if not mapper:
mapper = routes.Mapper()
mapper.resource("test", "tests",
controller=os_wsgi.ResourceV21(controller))
super(TestRouterV21, self).__init__(mapper)
class FakeAuthDatabase(object):
data = {}
@staticmethod
def auth_token_get(context, token_hash):
return FakeAuthDatabase.data.get(token_hash, None)
@staticmethod
def auth_token_create(context, token):
fake_token = FakeToken(created_at=timeutils.utcnow(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
return fake_token
@staticmethod
def auth_token_destroy(context, token_id):
token = FakeAuthDatabase.data.get('id_%i' % token_id)
if token and token.token_hash in FakeAuthDatabase.data:
del FakeAuthDatabase.data[token.token_hash]
del FakeAuthDatabase.data['id_%i' % token_id]
class FakeRateLimiter(object):
def __init__(self, application):
self.application = application
@webob.dec.wsgify
def __call__(self, req):
return self.application
def create_info_cache(nw_cache):
if nw_cache is None:
pub0 = ('192.168.1.100',)
pub1 = ('2001:db8:0:1::1',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.1.0/24',
'ips': [_ip(ip) for ip in pub0]},
{'cidr': 'b33f::/64',
'ips': [_ip(ip) for ip in pub1]}]}}]
if not isinstance(nw_cache, six.string_types):
nw_cache = jsonutils.dumps(nw_cache)
return {
"info_cache": {
"network_info": nw_cache,
"deleted": False,
"created_at": None,
"deleted_at": None,
"updated_at": None,
}
}
def get_fake_uuid(token=0):
if token not in FAKE_UUIDS:
FAKE_UUIDS[token] = str(uuid.uuid4())
return FAKE_UUIDS[token]
def fake_instance_get(**kwargs):
def _return_server(context, uuid, columns_to_join=None, use_slave=False):
return stub_instance(1, **kwargs)
return _return_server
def fake_compute_get(**kwargs):
def _return_server_obj(context, uuid, want_objects=False,
expected_attrs=None):
return stub_instance_obj(context, **kwargs)
return _return_server_obj
def fake_actions_to_locked_server(self, context, instance, *args, **kwargs):
raise exc.InstanceIsLocked(instance_uuid=instance['uuid'])
def fake_instance_get_all_by_filters(num_servers=5, **kwargs):
def _return_servers(context, *args, **kwargs):
servers_list = []
marker = None
limit = None
found_marker = False
if "marker" in kwargs:
marker = kwargs["marker"]
if "limit" in kwargs:
limit = kwargs["limit"]
if 'columns_to_join' in kwargs:
kwargs.pop('columns_to_join')
if 'use_slave' in kwargs:
kwargs.pop('use_slave')
if 'sort_keys' in kwargs:
kwargs.pop('sort_keys')
if 'sort_dirs' in kwargs:
kwargs.pop('sort_dirs')
for i in range(num_servers):
uuid = get_fake_uuid(i)
server = stub_instance(id=i + 1, uuid=uuid,
**kwargs)
servers_list.append(server)
if marker is not None and uuid == marker:
found_marker = True
servers_list = []
if marker is not None and not found_marker:
raise exc.MarkerNotFound(marker=marker)
if limit is not None:
servers_list = servers_list[:limit]
return servers_list
return _return_servers
def fake_compute_get_all(num_servers=5, **kwargs):
def _return_servers_objs(context, search_opts=None, limit=None,
marker=None, want_objects=False,
expected_attrs=None, sort_keys=None,
sort_dirs=None):
db_insts = fake_instance_get_all_by_filters()(None,
limit=limit,
marker=marker)
expected = ['metadata', 'system_metadata', 'flavor',
'info_cache', 'security_groups']
return base.obj_make_list(context, objects.InstanceList(),
objects.Instance, db_insts,
expected_attrs=expected)
return _return_servers_objs
def stub_instance(id=1, user_id=None, project_id=None, host=None,
node=None, vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0,
auto_disk_config=False, display_name=None,
include_fake_metadata=True, config_drive=None,
power_state=None, nw_cache=None, metadata=None,
security_groups=None, root_device_name=None,
limit=None, marker=None,
launched_at=timeutils.utcnow(),
terminated_at=timeutils.utcnow(),
availability_zone='', locked_by=None, cleaned=False,
memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0,
instance_type=None, launch_index=0, kernel_id="",
ramdisk_id="", user_data=None):
if user_id is None:
user_id = 'fake_user'
if project_id is None:
project_id = 'fake_project'
if metadata:
metadata = [{'key': k, 'value': v} for k, v in metadata.items()]
elif include_fake_metadata:
metadata = [models.InstanceMetadata(key='seq', value=str(id))]
else:
metadata = []
inst_type = flavors.get_flavor_by_flavor_id(int(flavor_id))
sys_meta = flavors.save_flavor_info({}, inst_type)
if host is not None:
host = str(host)
if key_name:
key_data = 'FAKE'
else:
key_data = ''
if security_groups is None:
security_groups = [{"id": 1, "name": "test", "description": "Foo:",
"project_id": "project", "user_id": "user",
"created_at": None, "updated_at": None,
"deleted_at": None, "deleted": False}]
# ReservationID isn't sent back, hack it in there.
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
info_cache = create_info_cache(nw_cache)
if instance_type is None:
instance_type = flavors.get_default_flavor()
flavorinfo = jsonutils.dumps({
'cur': instance_type.obj_to_primitive(),
'old': None,
'new': None,
})
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"deleted_at": datetime.datetime(2010, 12, 12, 10, 0, 0),
"deleted": None,
"user_id": user_id,
"project_id": project_id,
"image_ref": image_ref,
"kernel_id": kernel_id,
"ramdisk_id": ramdisk_id,
"launch_index": launch_index,
"key_name": key_name,
"key_data": key_data,
"config_drive": config_drive,
"vm_state": vm_state or vm_states.BUILDING,
"task_state": task_state,
"power_state": power_state,
"memory_mb": memory_mb,
"vcpus": vcpus,
"root_gb": root_gb,
"ephemeral_gb": ephemeral_gb,
"ephemeral_key_uuid": None,
"hostname": display_name or server_name,
"host": host,
"node": node,
"instance_type_id": 1,
"instance_type": inst_type,
"user_data": user_data,
"reservation_id": reservation_id,
"mac_address": "",
"launched_at": launched_at,
"terminated_at": terminated_at,
"availability_zone": availability_zone,
"display_name": display_name or server_name,
"display_description": "",
"locked": locked_by is not None,
"locked_by": locked_by,
"metadata": metadata,
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"uuid": uuid,
"progress": progress,
"auto_disk_config": auto_disk_config,
"name": "instance-%s" % id,
"shutdown_terminate": True,
"disable_terminate": False,
"security_groups": security_groups,
"root_device_name": root_device_name,
"system_metadata": utils.dict_to_metadata(sys_meta),
"pci_devices": [],
"vm_mode": "",
"default_swap_device": "",
"default_ephemeral_device": "",
"launched_on": "",
"cell_name": "",
"architecture": "",
"os_type": "",
"extra": {"numa_topology": None,
"pci_requests": None,
"flavor": flavorinfo,
},
"cleaned": cleaned}
instance.update(info_cache)
instance['info_cache']['instance_uuid'] = instance['uuid']
return instance
def stub_instance_obj(ctxt, *args, **kwargs):
db_inst = stub_instance(*args, **kwargs)
expected = ['metadata', 'system_metadata', 'flavor',
'info_cache', 'security_groups']
inst = objects.Instance._from_db_object(ctxt, objects.Instance(),
db_inst,
expected_attrs=expected)
inst.fault = None
return inst
def stub_volume(id, **kwargs):
volume = {
'id': id,
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
'instance_uuid': 'fakeuuid',
'mountpoint': '/',
'status': 'fakestatus',
'attach_status': 'attached',
'name': 'vol name',
'display_name': 'displayname',
'display_description': 'displaydesc',
'created_at': datetime.datetime(1999, 1, 1, 1, 1, 1),
'snapshot_id': None,
'volume_type_id': 'fakevoltype',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'}}
volume.update(kwargs)
return volume
def stub_volume_create(self, context, size, name, description, snapshot,
**param):
vol = stub_volume('1')
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
try:
vol['snapshot_id'] = snapshot['id']
except (KeyError, TypeError):
vol['snapshot_id'] = None
vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
return vol
def stub_volume_update(self, context, *args, **param):
pass
def stub_volume_delete(self, context, *args, **param):
pass
def stub_volume_get(self, context, volume_id):
return stub_volume(volume_id)
def stub_volume_notfound(self, context, volume_id):
raise exc.VolumeNotFound(volume_id=volume_id)
def stub_volume_get_all(context, search_opts=None):
return [stub_volume(100, project_id='fake'),
stub_volume(101, project_id='superfake'),
stub_volume(102, project_id='superduperfake')]
def stub_volume_check_attach(self, context, *args, **param):
pass
def stub_snapshot(id, **kwargs):
snapshot = {
'id': id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': timeutils.utcnow(),
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake'
}
snapshot.update(kwargs)
return snapshot
def stub_snapshot_create(self, context, volume_id, name, description):
return stub_snapshot(100, volume_id=volume_id, display_name=name,
display_description=description)
def stub_compute_volume_snapshot_create(self, context, volume_id, create_info):
return {'snapshot': {'id': 100, 'volumeId': volume_id}}
def stub_snapshot_delete(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.SnapshotNotFound(snapshot_id=snapshot_id)
def stub_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
pass
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.SnapshotNotFound(snapshot_id=snapshot_id)
return stub_snapshot(snapshot_id)
def stub_snapshot_get_all(self, context):
return [stub_snapshot(100, project_id='fake'),
stub_snapshot(101, project_id='superfake'),
stub_snapshot(102, project_id='superduperfake')]
def stub_bdm_get_all_by_instance(context, instance_uuid, use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 'volume_id1', 'instance_uuid': instance_uuid}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2, 'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 'volume_id2', 'instance_uuid': instance_uuid})]
def fake_get_available_languages():
existing_translations = ['en_GB', 'en_AU', 'de', 'zh_CN', 'en_US']
return existing_translations
def fake_not_implemented(*args, **kwargs):
raise NotImplementedError()
| {
"content_hash": "c9a66a62101253e5df636cd8ed5d62ff",
"timestamp": "",
"source": "github",
"line_count": 708,
"max_line_length": 79,
"avg_line_length": 32.76836158192091,
"alnum_prop": 0.5853879310344827,
"repo_name": "devendermishrajio/nova_test_latest",
"id": "a56228cd8d9dc4b7f8f4e97448e1af7f4a5839e7",
"size": "23836",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "nova/tests/unit/api/openstack/fakes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16277164"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "283675"
}
],
"symlink_target": ""
} |
'''
unit tests for conary facade
'''
from rbuild_test import rbuildhelp
from testutils import mock
import os
from rbuild.facade import conaryfacade
from rbuild import errors
from conary import conarycfg
from conary import conaryclient
from conary import checkin
from conary import errors as conaryerrors
from conary import state
from conary import versions
from conary.cmds import clone
from conary.cmds import updatecmd
from conary.versions import VersionFromString as VFS
from conary.versions import Label
from conary.build import loadrecipe, use, errors as builderrors
from conary.deps import deps
from conary.deps.deps import parseFlavor as Flavor
def mockedMethod(self, real, saveList, fakeReturn, *args, **kwargs):
'''
generic mocked method
@param self: the real object to which the mocked method belongs
@param real: C{None} or function (normally the real method) to call
@param saveList: C{None} or list in which to save arguments
@param fakeReturn: value to return if real is C{None}
@param args: real arguments to method
'''
if saveList is not None:
saveList.append(args)
if real:
return real(self, *args, **kwargs)
else:
return fakeReturn
def mockedFunction(real, saveList, fakeReturn, *args, **kwargs):
'''
generic mocked function
@param real: C{None} or function (normally the real function) to call
@param saveList: C{None} or list in which to save arguments
@param fakeReturn: value to return if real is C{None}
@param args: real arguments to function
'''
if saveList is not None:
saveList.append((args, kwargs))
if real:
return real(*args, **kwargs)
else:
return fakeReturn
class MockConfig(object):
def __init__(self, serverUrl=None):
self.serverUrl = serverUrl
self.includedConfigFile = None
self.repositoryMap = {}
self.user = []
self.name = None
self.contact = None
self.signatureKey = None
self.signatureKeyMap = {}
def includeConfigFile(self, path):
self.includedConfigFile = path
class MockHandle(object):
def __init__(self, serverUrl=None):
self.serverUrl = serverUrl
self._cfg = None
self.ui = mock.MockObject()
def _setServerUrl(self, serverUrl):
self.serverUrl = serverUrl
def getConfig(self):
if self._cfg is None:
self._cfg = MockConfig(serverUrl=self.serverUrl)
return self._cfg
class MockRepositoryClient(object):
def __init__(self):
self.recordFindTroveArgs = []
self.recordCommitChangeSetArgs = []
# this method is sometimes overridden in subtests
#pylint: disable-msg=E0202
def findTroves(self, labelPath, troveSpecs, defaultFlavor=None, allowMissing=False):
results = {}
if labelPath:
if not isinstance(labelPath, (tuple, list)):
labelPath = labelPath,
else:
labelPath = tuple(labelPath)
for troveSpec in troveSpecs:
self.recordFindTroveArgs.append((labelPath, troveSpec))
if troveSpec[2] is not None:
flavor = troveSpec[2]
else:
flavor = deps.parseFlavor('')
if labelPath:
if troveSpec[1]:
verPart = troveSpec[1]
else:
verPart = '1.0-1-1'
troveTup = (troveSpec[0],
versions.VersionFromString('/%s/%s'%(
labelPath[0], verPart)),
flavor)
else:
troveTup = (troveSpec[0],
versions.VersionFromString(troveSpec[1]),
flavor)
results[troveSpec] = [troveTup]
return results
def commitChangeSet(self, cs):
self.recordFindTroveArgs.append(cs)
class ConaryFacadeTest(rbuildhelp.RbuildHelper):
def getMockedHandle(self, serverUrl=None):
return MockHandle(serverUrl=serverUrl)
def getFacade(self, handle):
return conaryfacade.ConaryFacade(handle)
def prep(self):
handle = self.getMockedHandle()
facade = self.getFacade(handle)
return handle, facade
def prepReposState(self, facade):
mock.mockMethod(facade._getRepositoryStateFromDirectory)
repos = mock.MockObject()
sourceState = mock.MockObject()
facade._getRepositoryStateFromDirectory._mock.setDefaultReturn(
[repos, sourceState])
return (repos, sourceState)
def testParseRBuilderConfigFile(self):
handle, facade = self.prep()
cfg = MockConfig()
facade._parseRBuilderConfigFile(cfg)
assert cfg.includedConfigFile is None
handle._setServerUrl('http://conary.example.com')
handle._cfg = None # cached config is now wrong, must regenerate
facade._parseRBuilderConfigFile(cfg)
assert cfg.includedConfigFile == 'http://conary.example.com/conaryrc'
def xtestParseRBuilderConfigFile(self):
handle, facade = self.prep()
cfg = MockConfig()
facade._parseRBuilderConfigFile(cfg)
assert cfg.includedConfigFile is None
handle._setServerUrl('http://conary.example.com')
handle._cfg = None # cached config is now wrong, must regenerate
facade._parseRBuilderConfigFile(cfg)
assert cfg.includedConfigFile == 'http://conary.example.com/conaryrc'
def testGetConaryClient(self):
_, facade = self.prep()
mock.mock(facade, 'getConaryConfig')
facade.getConaryConfig._mock.setDefaultReturn('c')
savedArgs = []
self.mock(conaryclient, 'ConaryClient',
lambda *args: mockedFunction(None, savedArgs, None, *args))
facade._getConaryClient()
self.assertEquals(savedArgs, [(('c',), {})])
def testGetRepositoryClient(self):
_, facade = self.prep()
mock.mock(facade, '_getConaryClient')
conaryClient = mock.MockObject()
conaryClient.getRepos._mock.setDefaultReturn('r')
facade._getConaryClient._mock.setDefaultReturn(conaryClient)
assert facade._getRepositoryClient() == 'r', \
'Failed to find return from getRepos'
def testGetVersion(self):
_, facade = self.prep()
versionString = '/a@b:c/1.2-3-4'
versionObject = versions.VersionFromString(versionString)
assert facade._getVersion(versionString) == versionObject
assert facade._getVersion(versionObject) == versionObject
def testGetLabel(self):
_, facade = self.prep()
labelString = 'a@b:c'
labelObject = versions.Label(labelString)
assert facade._getLabel(labelString) == labelObject
assert facade._getLabel(labelObject) == labelObject
v1 = '/a@b:c'
v2 = '/b@c:d/%s' % v1
v3 = '%s/1.2.3-1-1' % v1
assert facade._getLabel(v1) == labelObject
assert facade._getLabel(v2) == labelObject
assert facade._getLabel(v3) == labelObject
def testIsValidLabel(self):
_, facade = self.prep()
assert facade.isValidLabel('a@b:c')
assert (facade.isValidLabel('a') == False)
assert (facade.isValidLabel('a@b') == False)
assert (facade.isValidLabel('a@:c') == False)
assert (facade.isValidLabel('b:c') == False)
assert (facade.isValidLabel('a@b:c/1') == False)
assert (facade.isValidLabel('/a@b:c') == False)
def testGetFlavor(self):
_, facade = self.prep()
flavorString = '!bootstrap is:x86'
flavorObject = deps.parseFlavor(flavorString)
assert facade._getFlavor(flavorString) == flavorObject
assert facade._getFlavor(flavorObject) == flavorObject
flavorObject = deps.parseFlavor('') # same as deps.Flavor()
assert facade._getFlavor() == flavorObject
assert facade._getFlavor(None, keepNone=True) == None
def testFindTrove(self):
_, facade = self.prep()
r = MockRepositoryClient()
self.mock(conaryfacade.ConaryFacade, '_getRepositoryClient',
lambda *args: mockedMethod(args[0], None, None, r, *args[1:]))
# pointless to mock _getVersion and _getFlavor
versionString = '/a@b:c/1.2.3-1'
returnedTroveTup = facade._findTrove('foo:source', versionString)
assert len(r.recordFindTroveArgs) == 1
labelPath, troveTup = r.recordFindTroveArgs[0]
name, versionObject, flavorObject = returnedTroveTup
assert troveTup[1] == str(returnedTroveTup[1])
assert labelPath is None
assert name == 'foo:source'
assert versionObject == versions.VersionFromString(versionString)
assert flavorObject == deps.Flavor()
r.recordFindTroveArgs = []
returnedTroveTup = facade._findTrove('foo', '1.2.3-1-1',
labelPath='a@b:c',
flavor='bootstrap')
assert len(r.recordFindTroveArgs) == 1
labelPath, troveTup = r.recordFindTroveArgs[0]
name, versionObject, flavorObject = returnedTroveTup
# transformed due to labelPath:
assert troveTup[1] != str(returnedTroveTup[1])
assert labelPath == ('a@b:c',)
assert name == 'foo'
assert versionObject == versions.VersionFromString('/a@b:c/1.2.3-1-1')
assert flavorObject == deps.parseFlavor('bootstrap')
r.findTroves = lambda *args, **kw: {}
returnedTroveTup = facade._findTrove('foo', '1.2.3-1-1',
labelPath='a@b:c',
flavor='bootstrap')
assert(returnedTroveTup is None)
def findTroves(*args, **kw):
raise conaryerrors.LabelPathNeeded
r.findTroves = findTroves
self.assertRaises(errors.RbuildError, facade._findTrove,
'foo', '1.2.3-1-1')
def testFindTroves(self):
_, facade = self.prep()
repos = MockRepositoryClient()
mock.mockMethod(facade._getRepositoryClient, repos)
results = facade._findTroves([('foo', None, None)],
['localhost@rpl:1', 'localhost@rpl:2'])
assert(results == {('foo', None, None):
[('foo', VFS('/localhost@rpl:1/1.0-1-1'), Flavor(''))]})
results = facade._findTrovesFlattened([('foo', None, None)],
['localhost@rpl:1', 'localhost@rpl:2'])
assert(results == [('foo',
VFS('/localhost@rpl:1/1.0-1-1'), Flavor(''))])
results = facade._findTroves(['foo[ssl]'], 'localhost@rpl:1')
assert(results == {('foo[ssl]'):
[('foo', VFS('/localhost@rpl:1/1.0-1-1'), Flavor('ssl'))]})
results = facade._findTrovesFlattened(['foo[ssl]'], 'localhost@rpl:1')
assert(results == [('foo',
VFS('/localhost@rpl:1/1.0-1-1'), Flavor('ssl'))])
def testVersionToString(self):
_, facade = self.prep()
versionString = '/a@b:c/1.2-3-4'
versionObject = versions.VersionFromString(versionString)
assert facade._versionToString(versionString) == versionString
assert facade._versionToString(versionObject) == versionString
def testFlavorToString(self):
_, facade = self.prep()
flavorString = '!bootstrap is:x86'
flavorObject = deps.parseFlavor(flavorString)
flavorString = str(flavorObject) # get canonical representation
assert facade._flavorToString(flavorObject) == flavorString
assert facade._flavorToString(flavorString) == flavorString
assert facade._flavorToString(None) == ''
def testTroveTupToStrings(self):
_, facade = self.prep()
name = 'foo'
versionString = '/a@b:c/1.2-3-4'
flavorString = '!bootstrap is:x86'
flavorObject = deps.parseFlavor(flavorString)
flavorString = str(flavorObject) # get canonical representation
returnTroveTup = facade._troveTupToStrings(name, versionString,
flavorString)
returnName, returnVersionString, returnFlavorString = returnTroveTup
assert returnName == name
assert returnVersionString == versionString
assert returnFlavorString == flavorString
def testGetConaryConfig(self):
handle, facade = self.prep()
mockConaryConfig = MockConfig()
handle.getConfig() # create handle._cfg
handle._cfg.applianceTemplate = 'applianceTemplate'
handle._cfg.factoryTemplate = 'factoryTemplate'
handle._cfg.groupTemplate = 'groupTemplate'
handle._cfg.recipeTemplate = 'recipeTemplate'
handle._cfg.recipeTemplateDirs = ['recipeTemplateDir', 'recipeTemplateDir2']
handle._cfg.repositoryMap = {'foo': 'bar'}
handle._cfg.user = ('rbuildCfgUser', 'rbuildCfgPassword')
handle._cfg.name = 'rbuildCfgName'
handle._cfg.contact = 'rbuildCfgContact'
handle._cfg.signatureKey = 'ASDF'
handle._cfg.signatureKeyMap = {'foo': 'FDSA'}
handle._cfg.repositoryUser = [('foo', 'foouser', 'foopassword')]
self.mock(conarycfg, 'ConaryConfiguration',
lambda *args: mockedFunction(None, None, mockConaryConfig, *args))
facadeCfg = facade.getConaryConfig()
self.assertEquals(facadeCfg.repositoryMap, handle._cfg.repositoryMap)
self.assertEquals(facadeCfg.user, [
('foo', 'foouser', 'foopassword'),
('*', 'rbuildCfgUser', 'rbuildCfgPassword')])
self.assertEquals(facadeCfg.name, handle._cfg.name)
self.assertEquals(facadeCfg.contact, handle._cfg.contact)
self.assertEquals(facadeCfg.signatureKey, 'ASDF')
self.assertEquals(facadeCfg.signatureKeyMap, {'foo': 'FDSA'})
facadeCfgCached = facade.getConaryConfig()
self.assertEquals(facadeCfg, facadeCfgCached)
def test_getBaseConaryConfig(self):
_, facade = self.prep()
def readFiles(s):
s.read(self.workDir + '/conaryrc')
self.mock(conarycfg.ConaryConfiguration, 'readFiles', readFiles)
name = 'John Doe'
contact = 'http://john.doe/'
open(self.workDir + '/conaryrc', 'w').write(
'name %s\ncontact %s\n' %(name, contact))
ccfg = facade._getBaseConaryConfig()
self.assertEquals(ccfg.name, name)
self.assertEquals(ccfg.contact, contact)
def testSetFactoryFlag(self):
_, facade = self.prep()
self.mock(checkin, 'factory', mock.MockObject())
facade.setFactoryFlag(None)
checkin.factory._mock.assertCalled('', targetDir=None)
facade.setFactoryFlag(None, 'a')
checkin.factory._mock.assertCalled('', targetDir='a')
def testCommit(self):
_, facade = self.prep()
mockConaryCfg = mock.MockObject()
mock.mockMethod(facade.getConaryConfig)
facade.getConaryConfig._mock.setDefaultReturn(mockConaryCfg)
mock.mockMethod(facade._getRepositoryClient)
facade._getRepositoryClient._mock.setDefaultReturn('r')
savedArgs = []
self.mock(checkin, 'commit',
lambda *args, **kwargs: mockedFunction(None, savedArgs, None, *args, **kwargs))
targetDir = os.path.join(self.workDir, "target")
os.makedirs(targetDir)
facade.commit(targetDir=targetDir, message="message 1")
expectedArgs = [(('r', mockConaryCfg),
dict(message="message 1"))]
self.assertEquals(savedArgs, expectedArgs)
def testCheckout(self):
_, facade = self.prep()
mockConaryCfg = mock.MockObject()
mock.mockMethod(facade.getConaryConfig)
facade.getConaryConfig._mock.setDefaultReturn(mockConaryCfg)
mock.mockMethod(facade._getRepositoryClient)
facade._getRepositoryClient._mock.setDefaultReturn('r')
savedArgs = []
self.mock(checkin, 'checkout',
lambda *args: mockedFunction(None, savedArgs, None, *args))
facade.checkout('packageName', 'labelName', targetDir='targetDirName')
expectedArgs = [(('r', mockConaryCfg, 'targetDirName',
['packageName=labelName']), {})]
self.assertEquals(savedArgs, expectedArgs)
def testDetachPackage(self):
_, facade = self.prep()
mockConaryCfg = mock.MockObject()
mock.mockMethod(facade.getConaryConfig)
facade.getConaryConfig._mock.setDefaultReturn(mockConaryCfg)
mock.mock(clone, 'CloneTrove')
troveSpec = self.makeTroveTuple('foo:source')
facade.detachPackage(troveSpec, '/targetlabel.rpath.org@rpath:1')
clone.CloneTrove._mock.assertCalled(mockConaryCfg,
'/targetlabel.rpath.org@rpath:1',
[troveSpec[0]+'='+troveSpec[1].asString()],
message='Automatic promote by rBuild.')
facade.detachPackage(troveSpec, '/targetlabel.rpath.org@rpath:1', 'blech')
clone.CloneTrove._mock.assertCalled(mockConaryCfg,
'/targetlabel.rpath.org@rpath:1',
[troveSpec[0]+'='+troveSpec[1].asString()],
message='blech')
def testRefresh(self):
handle, facade = self.prep()
mockConaryCfg = mock.MockObject()
mock.mockMethod(facade.getConaryConfig)
facade.getConaryConfig._mock.setDefaultReturn(mockConaryCfg)
mock.mockMethod(facade._getRepositoryClient)
facade._getRepositoryClient._mock.setDefaultReturn('r')
mock.mockMethod(facade._initializeFlavors)
mock.mockFunction(use.setBuildFlagsFromFlavor)
savedArgs = []
self.mock(checkin, 'refresh',
lambda *args, **kw: mockedFunction(None, savedArgs, None, *args))
facade.refresh()
expectedArgs = [(('r', mockConaryCfg), {})]
self.assertEquals(savedArgs, expectedArgs)
facade._initializeFlavors._mock.assertCalled()
use.setBuildFlagsFromFlavor._mock.assertCalled(None,
mockConaryCfg.buildFlavor, False)
def testUpdateCheckout(self):
_, facade = self.prep()
mock.mockMethod(facade._getRepositoryClient)
facade._getRepositoryClient._mock.setDefaultReturn('r')
savedArgs = []
self.mock(checkin, 'nologUpdateSrc',
lambda *args: mockedFunction(None, savedArgs, True, *args))
facade.updateCheckout('targetDirName')
self.assertEquals(savedArgs, [
(('r', [os.sep.join((os.getcwd(), 'targetDirName'))]), {})])
# Up to date condition
def Up2Date(*args):
raise builderrors.UpToDate('testDirName')
self.mock(checkin, 'nologUpdateSrc', Up2Date)
self.assertEquals(True, facade.updateCheckout('targetDirName'))
# note yet checked in
def Up2Date(*args):
raise builderrors.NotCheckedInError('testDirName')
self.mock(checkin, 'nologUpdateSrc', Up2Date)
self.assertEquals(True, facade.updateCheckout('targetDirName'))
#Failure conditions
savedArgs = []
def attrErrorRaise(*args):
raise AttributeError('backwards compatibility test')
self.mock(checkin, 'nologUpdateSrc', attrErrorRaise)
self.mock(checkin, 'updateSrc',
lambda *args: mockedFunction(None, savedArgs, None, *args))
self.assertEquals(None, facade.updateCheckout('targetDirName'))
self.assertEquals(savedArgs, [
(('r', [os.sep.join((os.getcwd(), 'targetDirName'))]), {})])
def CheckinErrorRaise(*args):
raise builderrors.CheckinError()
self.mock(checkin, 'nologUpdateSrc', CheckinErrorRaise)
error = self.assertRaises(errors.RbuildError, facade.updateCheckout,
'targetDirName')
self.assertEquals(builderrors.CheckinError.__doc__, error.msg)
def testGetCheckoutStatus(self):
_, facade = self.prep()
mock.mockMethod(facade._getRepositoryClient)
facade._getRepositoryClient._mock.setDefaultReturn('r')
mockedGenerateStatus = mock.MockObject()
mockedGenerateStatus._mock.setDefaultReturn(['asdf'])
self.mock(checkin, 'generateStatus', mockedGenerateStatus)
ret = facade.getCheckoutStatus('targetDirName')
mockedGenerateStatus._mock.assertCalled('r', dirName='targetDirName')
assert ret == ['asdf']
def testGetCheckoutLog(self):
_, facade = self.prep()
repos, sourceState = self.prepReposState(facade)
sourceState.getName._mock.setDefaultReturn('name')
mock.mockMethod(facade._getRepositoryVersions)
mock.mockMethod(facade._getNewerRepositoryVersions)
facade._getRepositoryVersions._mock.setDefaultReturn(['1'])
facade._getNewerRepositoryVersions._mock.setDefaultReturn(['broken'])
flavor = mock.MockObject()
flavor._mock.setDefaultReturn(flavor)
self.mock(deps, 'Flavor', flavor)
trove = mock.MockObject()
repos.getTroves._mock.setReturn(trove, [('name', '1', flavor)])
mockedIterLogMessages = mock.MockObject()
mockedIterLogMessages._mock.setDefaultReturn(['asdf'])
self.mock(checkin, 'iterLogMessages', mockedIterLogMessages)
ret = facade.getCheckoutLog('targetDirName')
mockedIterLogMessages._mock.assertCalled(trove)
self.assertEquals(ret, ['asdf'])
facade._getNewerRepositoryVersions._mock.setDefaultReturn(['1'])
facade._getRepositoryVersions._mock.setDefaultReturn(['broken'])
ret = facade.getCheckoutLog('dir2', newerOnly=True)
mockedIterLogMessages._mock.assertCalled(trove)
self.assertEquals(ret, ['asdf'])
mock.mock(facade, '_getVersion')
facade._getVersion._mock.setReturn('1', 'string')
ret = facade.getCheckoutLog('dir3', versionList=['string'])
mockedIterLogMessages._mock.assertCalled(trove)
self.assertEquals(ret, ['asdf'])
def testIterRepositoryDiff(self):
_, facade = self.prep()
repos, sourceState = self.prepReposState(facade)
ver = mock.MockObject()
lastver = mock.MockObject()
sourceState.getName._mock.setDefaultReturn('name')
sourceState.getVersion._mock.setDefaultReturn(ver)
mock.mockMethod(facade._getNewerRepositoryVersions)
facade._getNewerRepositoryVersions._mock.setDefaultReturn(['broken'])
mock.mock(facade, '_getVersion')
facade._getVersion._mock.setReturn(lastver, lastver)
mockedGetIterRdiff = mock.MockObject()
mockedGetIterRdiff._mock.setDefaultReturn(['asdf'])
self.mock(checkin, '_getIterRdiff', mockedGetIterRdiff)
output = [x for x in facade.iterRepositoryDiff('targetDirName',
lastver=lastver)]
mockedGetIterRdiff._mock.assertCalled(repos, ver.branch().label(),
'name', ver.asString(), lastver.asString())
self.assertEquals(output, ['asdf'])
facade._getNewerRepositoryVersions._mock.setDefaultReturn(
[None, None, None, lastver])
output = [x for x in facade.iterRepositoryDiff('targetDirName')]
mockedGetIterRdiff._mock.assertCalled(repos, ver.branch().label(),
'name', ver.asString(), lastver.asString())
self.assertEquals(output, ['asdf'])
def testIterCheckoutDiff(self):
_, facade = self.prep()
repos, sourceState = self.prepReposState(facade)
mockedIterDiff = mock.MockObject()
self.mock(checkin, '_getIterDiff', mockedIterDiff)
sourceState.getVersion().asString._mock.setDefaultReturn('1')
mockedIterDiff._mock.setReturn(['asdf'], repos, '1',
pathList=None, logErrors=False, dirName='.')
output = [x for x in facade.iterCheckoutDiff('.')]
self.assertEquals(output, ['asdf'])
sourceState.getVersion().asString._mock.setDefaultReturn('2')
mockedIterDiff._mock.setReturn(0, repos, '2',
pathList=None, logErrors=False, dirName='.')
output = [x for x in facade.iterCheckoutDiff('.')]
self.assertEquals(output, [])
sourceState.getVersion().asString._mock.setDefaultReturn('3')
mockedIterDiff._mock.setReturn(2, repos, '3',
pathList=None, logErrors=False, dirName='.')
output = [x for x in facade.iterCheckoutDiff('.')]
self.assertEquals(output, [])
def testGetNewerRepositoryVersionStrings(self):
_, facade = self.prep()
mock.mockMethod(facade._getNewerRepositoryVersions)
facade._getNewerRepositoryVersions._mock.setReturn([1], '.')
mock.mock(facade, '_versionToString')
facade._versionToString._mock.setReturn('1', 1)
output = [x for x in facade._getNewerRepositoryVersionStrings('.')]
self.assertEquals(output, ['1'])
def testGetNewerRepositoryVersions(self):
_, facade = self.prep()
repos, sourceState = self.prepReposState(facade)
sourceState.getVersion._mock.setDefaultReturn(1)
ver0 = mock.MockObject()
ver1 = mock.MockObject()
ver2 = mock.MockObject()
ver0.isAfter._mock.setReturn(False, 1)
ver1.isAfter._mock.setReturn(False, 1)
ver2.isAfter._mock.setReturn(True, 1)
mock.mockMethod(facade._getRepositoryVersions)
facade._getRepositoryVersions._mock.setDefaultReturn([ver0,ver1,ver2])
output = facade._getNewerRepositoryVersions('.')
self.assertEquals(output, [ver2])
def testGetRepositoryVersions(self):
_, facade = self.prep()
repos, sourceState = self.prepReposState(facade)
sourceState.getBranch._mock.setDefaultReturn('c.r.c@r:2')
sourceState.getName._mock.setDefaultReturn('asdf')
repos.getTroveVersionsByBranch._mock.setReturn(
{'asdf': {1:2, 3:4}}, {'asdf': {'c.r.c@r:2': None}})
output = facade._getRepositoryVersions('.')
self.assertEquals(output, [3,1])
repos.getTroveVersionsByBranch._mock.setReturn(
None, {'asdf': {'c.r.c@r:2': None}})
output = facade._getRepositoryVersions('.')
self.assertEquals(output, [])
def testGetRepositoryStateFromDirectory(self):
_, facade = self.prep()
repos = mock.MockObject()
mock.mockMethod(facade._getRepositoryClient, repos)
mock.mock(state, 'ConaryStateFromFile')
conaryState = mock.MockObject(stableReturnValues=True)
state.ConaryStateFromFile._mock.setDefaultReturn(conaryState)
sourceState = conaryState.getSourceState()
output = facade._getRepositoryStateFromDirectory('.')
self.assertEquals(output, (repos, sourceState))
def testIsConaryCheckoutDirectory(self):
_, facade = self.prep()
self.mock(os.path, 'exists', lambda *args: True)
output = facade.isConaryCheckoutDirectory('.')
self.unmock()
self.assertEquals(output, True)
self.mock(os.path, 'exists', lambda *args: False)
output = facade.isConaryCheckoutDirectory('.')
self.unmock()
self.assertEquals(output, False)
def testCreateNewPackage(self):
_, facade = self.prep()
mock.mockMethod(facade._getRepositoryClient)
facade._getRepositoryClient._mock.setDefaultReturn('r')
mock.mockMethod(facade.getConaryConfig)
facade.getConaryConfig._mock.setDefaultReturn('c')
newTrove = mock.MockObject()
self.mock(checkin, 'newTrove', newTrove)
facade.createNewPackage('packageName', 'labelName')
newTrove._mock.assertCalled('r', 'c', 'packageName=labelName',
dir=None, template=None, factory=None)
def testCreateNewPackageTemplate(self):
_, facade = self.prep()
mock.mockMethod(facade._getRepositoryClient)
facade._getRepositoryClient._mock.setDefaultReturn('r')
mock.mockMethod(facade.getConaryConfig)
facade.getConaryConfig._mock.setDefaultReturn('c')
newTrove = mock.MockObject()
self.mock(checkin, 'newTrove', newTrove)
facade.createNewPackage('packageName', 'labelName',
template='default')
newTrove._mock.assertCalled('r', 'c', 'packageName=labelName',
dir=None, template='default', factory=None)
def testCreateNewPackageFactory(self):
_, facade = self.prep()
mock.mockMethod(facade._getRepositoryClient)
facade._getRepositoryClient._mock.setDefaultReturn('r')
mock.mockMethod(facade.getConaryConfig)
facade.getConaryConfig._mock.setDefaultReturn('c')
newTrove = mock.MockObject()
self.mock(checkin, 'newTrove', newTrove)
facade.createNewPackage('packageName', 'labelName',
factory='thefact')
newTrove._mock.assertCalled('r', 'c', 'packageName=labelName',
dir=None, template=None, factory='thefact')
def testShadowSource(self):
_, facade = self.prep()
troveTup = ('name', 'version', 'targetLabel')
mock.mock(facade, '_getVersion')
mock.mock(facade, '_getFlavor')
mock.mock(facade, '_getLabel')
mock.mockMethod(facade._getConaryClient)
facade._getConaryClient._mock.setDefaultReturn(mock.MockObject())
# First, test the error-return case
client = facade._getConaryClient()
client.createShadowChangeSet._mock.setDefaultReturn(None)
assert facade.shadowSource(*troveTup) == False
facade._getVersion._mock.assertCalled('version')
facade._getFlavor._mock.assertCalled()
facade._getLabel._mock.assertCalled('targetLabel')
# now test the existing-shadow case
facade._getConaryClient().createShadowChangeSet._mock.setDefaultReturn(
([troveTup], None))
assert facade.shadowSource(*troveTup) == troveTup
# finally, test the actually-created-a-shadow case
cs = mock.MockObject(stableReturnValues=True)
cs.isEmpty._mock.setDefaultReturn(False)
trvCs = cs.iterNewTroveList()[0]
trvCs.getNewNameVersionFlavor._mock.setDefaultReturn(troveTup)
facade._getConaryClient().createShadowChangeSet._mock.setDefaultReturn(
(None, cs))
assert(facade.shadowSource('name', 'version', 'targetLabel')
== troveTup)
def testShadowSourceForBinary(self):
_, facade = self.prep()
name, version, flavor = self.makeTroveTuple('foo[ssl]')
targetLabel = 'localhost@rpl:1'
version, flavor = str(version), str(flavor)
client = mock.MockObject()
mock.mockMethod(facade._getConaryClient, client)
existingTroves = [self.makeTroveTuple('foo:source')]
cs = mock.MockObject(iterNewTroveList=lambda: [])
client.createShadowChangeSet._mock.setDefaultReturn((existingTroves,
cs))
results = facade.shadowSourceForBinary(name, version, flavor,
targetLabel)
trv = existingTroves[0]
assert(results == (trv[0], str(trv[1]), str(trv[2])))
facade._getConaryClient().createShadowChangeSet._mock.setDefaultReturn(
None)
results = facade.shadowSourceForBinary(name, version, flavor,
targetLabel)
assert(results == False)
def testCheckoutBinaryPackage(self):
_, facade = self.prep()
mock.mock(facade, '_getVersion')
mock.mock(facade, '_getFlavor')
mockConaryCfg = mock.MockObject()
mockConaryCfg._mock.enable('root')
mock.mockMethod(facade.getConaryConfig)
facade.getConaryConfig._mock.setDefaultReturn(mockConaryCfg)
# Pin callback object
callback = conaryfacade._QuietUpdateCallback()
self.mock(conaryfacade, '_QuietUpdateCallback', lambda: callback)
# quiet
savedArgs = []
doUpdateFn = lambda *args, **kwargs: mockedFunction(None, savedArgs,
None, *args, **kwargs)
self.mock(updatecmd, 'doUpdate', doUpdateFn)
facade.checkoutBinaryPackage('packageName', 'packageVersion',
'packageFlavor', 'targetDir')
self.assertEquals(mockConaryCfg.root, 'targetDir')
self.assertEquals(savedArgs, [
((mockConaryCfg, 'packageName=packageVersion[packageFlavor]'),
{'tagScript': None, 'callback': callback, 'depCheck': False})
])
# noisy
savedArgs = []
facade.checkoutBinaryPackage('packageName', 'packageVersion',
'packageFlavor', 'targetDir', quiet=False)
self.assertEquals(savedArgs, [
((mockConaryCfg, 'packageName=packageVersion[packageFlavor]'),
{'tagScript': None, 'callback': None, 'depCheck': False})
])
def testFindPackageInSearchPaths(self):
_, facade = self.prep()
repos = mock.MockObject()
mock.mockMethod(facade._getRepositoryClient, repos)
groupSpecFoo = ('group-foo', 'localhost@rpl:1', deps.parseFlavor(''))
groupSpecBar = ('group-bar', 'localhost@rpl:1', deps.parseFlavor(''))
groupTup = self.makeTroveTuple('group-foo=localhost@rpl:1/1:1.0-1-1')
groupTup2 = self.makeTroveTuple(
'group-foo=localhost@rpl:1/2:2.0-2-1[is:x86]')
groupTupBar = self.makeTroveTuple(
'group-bar=localhost@rpl:1/3:1.0-1-1')
groupTrv = mock.MockObject(stableReturnValues=True)
repos.findTroves._mock.setReturn({groupSpecFoo : [groupTup, groupTup2],
groupSpecBar : [groupTupBar]},
None, [groupSpecFoo],
allowMissing = True)
repos.getTroves._mock.setReturn([groupTrv],
[groupTup2], withFiles=False)
iterator = mock.MockObject()
fooTup = self.makeTroveTuple('foo')
iterator._mock._dict[0] = fooTup
iterator._mock._dict[1] = self.makeTroveTuple('blah')
groupTrv.iterTroveList._mock.setDefaultReturn(iterator)
self.assertEquals(facade._findPackageInSearchPaths([groupSpecFoo], 'foo'),
[fooTup])
# when no groups are found, return nothing
repos.findTroves._mock.setReturn({}, None, [groupSpecFoo],
allowMissing = True)
self.assertEquals(facade._findPackageInSearchPaths([groupSpecFoo], 'foo'),
[])
def testFindPackageInSearchPathsWithLabels(self):
_, facade = self.prep()
repos = mock.MockObject()
mock.mockMethod(facade._getRepositoryClient, repos)
groupSpecFoo = ('group-foo', 'localhost@rpl:1', deps.parseFlavor(''))
labelSpecFoo = (None, 'localhost@rpl:2', deps.parseFlavor(''))
troveSpecFoo = ('foo', ) + labelSpecFoo[1:]
groupTup = self.makeTroveTuple('group-foo=localhost@rpl:1/1:1.0-1-1')
groupTup2 = self.makeTroveTuple(
'group-foo=localhost@rpl:1/2:2.0-2-1[is:x86]')
fooTroveTups = [ self.makeTroveTuple(x)
for x in ['foo=localhost@rpl:2/1:1-1-1[is:x86_64]',
'foo=localhost@rpl:2/2:2-1-1[is:x86]']
]
groupTrv = mock.MockObject(stableReturnValues=True)
groupBarTrv = mock.MockObject(stableReturnValues=True)
repos.findTroves._mock.setReturn({groupSpecFoo : [groupTup, groupTup2],
troveSpecFoo : fooTroveTups },
None, [groupSpecFoo, troveSpecFoo],
allowMissing = True)
repos.getTroves._mock.setReturn([groupTrv],
[groupTup2], withFiles=False)
iterator = mock.MockObject()
fooTup = self.makeTroveTuple('foo')
iterator._mock._dict[0] = fooTup
iterator._mock._dict[1] = self.makeTroveTuple('blah')
groupTrv.iterTroveList._mock.setDefaultReturn(iterator)
self.assertEquals(facade._findPackageInSearchPaths(
[groupSpecFoo, labelSpecFoo], 'foo'), [fooTup, fooTroveTups[1]])
# when no groups are found, return just the data in the label search
# path
repos.findTroves._mock.setReturn({troveSpecFoo : fooTroveTups},
None, [groupSpecFoo, troveSpecFoo], allowMissing = True)
self.assertEquals(facade._findPackageInSearchPaths(
[groupSpecFoo, labelSpecFoo], 'foo'), [fooTroveTups[1]])
def test_overrideFlavors(self):
_, facade = self.prep()
self.assertEquals(facade._overrideFlavors('!foo is:x86',
['foo', 'bar']),
['foo is: x86', 'bar,!foo is: x86'])
def testLoadRecipeClassFromCheckout(self):
_, facade = self.prep()
repos = mock.MockObject()
mock.mockMethod(facade._getRepositoryClient, repos)
mock.mockMethod(facade.getConaryConfig)
mock.mock(state, 'ConaryStateFromFile')
mock.mock(loadrecipe, 'RecipeLoader')
loader = mock.MockObject()
loadrecipe.RecipeLoader._mock.setDefaultReturn(loader)
loader.getRecipe._mock.setDefaultReturn('recipe')
result = facade._loadRecipeClassFromCheckout(
self.workDir + '/foo.recipe')
self.assertEquals(result, 'recipe')
def testRemoveNonRecipeFilesFromCheckout(self):
handle, facade = self.prep()
repos = mock.MockObject()
mock.mockMethod(facade._getRepositoryClient, repos)
mock.mock(state, 'ConaryStateFromFile')
conaryState = mock.MockObject(stableReturnValues=True)
state.ConaryStateFromFile._mock.setDefaultReturn(conaryState)
sourceState = conaryState.getSourceState()
iterator = sourceState.iterFileList()
iterator._mock.setList([('pathId', 'foo.recipe', 'fileId', 'version'),
('pathId2', 'bam', 'fileId', 'version'),
('pathId3', 'directory', 'fileId', 'version')])
os.mkdir(self.workDir + '/foo')
os.mkdir(self.workDir + '/foo/directory')
self.writeFile(self.workDir + '/foo/foo.recipe', 'recipe')
self.writeFile(self.workDir + '/foo/bam', 'otherfile')
facade._removeNonRecipeFilesFromCheckout(
self.workDir + '/foo/foo.recipe')
conaryState.write._mock.assertCalled(self.workDir + '/foo/CONARY')
sourceState.removeFile._mock.assertCalled('pathId2')
sourceState.removeFile._mock.assertCalled('pathId3')
assert(sorted(os.listdir(self.workDir + '/foo')) == ['foo.recipe'])
# one more time, this time raising error on attempt to unlink
mock.mock(os, 'unlink')
os.unlink._mock.raiseErrorOnAccess(OSError('foo', 'bar'))
self.writeFile(self.workDir + '/foo/bam', 'otherfile')
facade._removeNonRecipeFilesFromCheckout(
self.workDir + '/foo/foo.recipe')
handle.ui.warning._mock.assertCalled(
'cannot remove %s: %s', '%s/foo/bam' % self.workDir, 'bar')
def testGetFlavorArch(self):
_, facade = self.prep()
assert(facade._getFlavorArch('foo,bar is:x86(~i686)') == 'x86')
assert(facade._getFlavorArch('foo,bar is:x86(~i686) x86_64') \
== 'x86_64')
assert(facade._getFlavorArch('foo,bar') == None)
def testGetShortFlavorDescriptors(self):
_, facade = self.prep()
results = facade._getShortFlavorDescriptors(['foo is:x86',
'bar is:x86'])
assert (results == {'foo is: x86' : 'x86-foo',
'bar is: x86' : 'x86-bar'})
# test short-circuit case
results = facade._getShortFlavorDescriptors([])
self.failUnlessEqual(results, {})
def testGetNameForCheckout(self):
_, facade = self.prep()
repos = mock.MockObject()
mock.mockMethod(facade._getRepositoryClient, repos)
mock.mock(state, 'ConaryStateFromFile')
conaryState = mock.MockObject(stableReturnValues=True)
state.ConaryStateFromFile._mock.setDefaultReturn(conaryState)
sourceState = conaryState.getSourceState()
sourceState.getName._mock.setReturn('foo:source')
assert(facade.getNameForCheckout('bam') == 'foo')
def testIsGroupName(self):
_, facade = self.prep()
assert(facade.isGroupName('group-foo'))
assert(not facade.isGroupName('group-bar:debuginfo'))
def testPromoteGroups(self):
_, facade = self.prep()
client = mock.MockObject()
mock.mockMethod(facade._getConaryClient, client)
success = True
cs = mock.MockObject()
groupList = [('group-dist', '/localhost@rpl:devel/1.0-1-1', '')],
trv = mock.MockObject()
trv.getNewNameVersionFlavor._mock.setReturn(
('group-dist', VFS('/localhost@rpl:qa/1.0-1-1'), ''))
cs.iterNewTroveList()._mock.setList([trv])
client.createSiblingCloneChangeSet._mock.setReturn((success, cs),
{Label('localhost@rpl:devel'): Label('localhost@rpl:qa'),
Label('other@somewhere:else'): Label('localhost@rpl:qa'),
Label('yetanother@somewhere:else'): VFS('/localhost@rpl:qa')},
groupList, cloneSources=True)
mock.mockMethod(facade._getRepositoryClient)
repos = facade._getRepositoryClient()
rc = facade.promoteGroups(groupList,
{'localhost@rpl:devel': 'localhost@rpl:qa',
'other@somewhere:else': facade._getLabel('localhost@rpl:qa'),
'yetanother@somewhere:else': '/localhost@rpl:qa'}) # RBLD-91
assert(rc == [('group-dist', '/localhost@rpl:qa/1.0-1-1', '')])
repos.commitChangeSet._mock.assertCalled(cs)
# failureCase
success = False
client.createSiblingCloneChangeSet._mock.setReturn((success, None),
{Label('localhost@rpl:devel'): Label('localhost@rpl:qa')},
groupList, cloneSources=True)
err = self.assertRaises(errors.RbuildError,
facade.promoteGroups, groupList,
{'localhost@rpl:devel': 'localhost@rpl:qa'})
assert(str(err) == 'Promote failed.')
def testLatestPackages(self):
_, facade = self.prep()
client = mock.MockObject()
mock.mockMethod(facade._getConaryClient, client)
client.getRepos().getTroveLatestByLabel._mock.setReturn(
{'foo': {'foover': ['flav1', 'flav2']},
'foo:runtime': {'foover': ['flav1', 'flav2']},
'bar': {'barver': ['flav3']},
'group-baz': {'bazver': ['flav4']},
},
{None: {versions.Label('localhost@rpl:devel'): [None]}})
# Defaults
packages = facade.getLatestPackagesOnLabel('localhost@rpl:devel')
self.failUnlessEqual(sorted(packages), [
('bar', 'barver', 'flav3'),
('foo', 'foover', 'flav1'),
('foo', 'foover', 'flav2'),
])
# With components
packages = facade.getLatestPackagesOnLabel('localhost@rpl:devel',
keepComponents=True)
self.failUnlessEqual(sorted(packages), [
('bar', 'barver', 'flav3'),
('foo', 'foover', 'flav1'),
('foo', 'foover', 'flav2'),
('foo:runtime', 'foover', 'flav1'),
('foo:runtime', 'foover', 'flav2'),
])
# With groups
packages = facade.getLatestPackagesOnLabel('localhost@rpl:devel',
keepGroups=True)
self.failUnlessEqual(sorted(packages), [
('bar', 'barver', 'flav3'),
('foo', 'foover', 'flav1'),
('foo', 'foover', 'flav2'),
('group-baz', 'bazver', 'flav4'),
])
def testFlavorNames(self):
handle, facade = self.prep()
# test prefers vs. requires
flvs = ('~flv1 is: x86', 'flv1 is: x86')
res = facade._getShortFlavorDescriptors(flvs)
self.assertEquals(res, {'flv1 is: x86': 'flv1 is: x86',
'~flv1 is: x86': '~flv1 is: x86'})
# test prefers not vs requires not
flvs = ('~!flv1 is: x86(test)', '!flv1 is: x86(test)', 'is: x86')
res = facade._getShortFlavorDescriptors(flvs)
self.assertEquals(res, {'~!flv1 is: x86(test)': '~!flv1 is: x86',
'!flv1 is: x86(test)': '!flv1 is: x86',
'is: x86': 'is: x86'})
# this worked all along
flvs = ('flv1, flv2 is: x86', 'flv1 is: x86')
res = facade._getShortFlavorDescriptors(flvs)
self.assertEquals(res, {'flv1,flv2 is: x86': 'x86-flv2',
'flv1 is: x86': 'x86'})
# this mixed flavors
flvs = ('flv1, flv2 is: x86', '~flv1, !flv2 is: x86')
res = facade._getShortFlavorDescriptors(flvs)
self.assertEquals(res, {'flv1,flv2 is: x86': 'x86-flv2',
'~flv1,!flv2 is: x86': 'x86'})
def testGetAllLabelsFromTroves(self):
handle, facade = self.prep()
mock.mock(facade, '_findTrovesFlattened')
specs = [('group-foo-appliance', 'foo@foo:foo', None)]
tups = [('group-foo-appliance', '/foo@foo:foo/1.2-3-4', 'is: x86')]
facade._findTrovesFlattened._mock.setReturn(tups, specs)
subTups = [mock.MockObject(), mock.MockObject()]
subTups[0][1].trailingLabel().asString._mock.setReturn('foo@foo:foo-bar')
subTups[1][1].trailingLabel().asString._mock.setReturn('foo@foo:foo-baz')
troves = [mock.MockObject()]
troves[0].iterTroveList._mock.setReturn(subTups,
strongRefs=True, weakRefs=True)
troves[0].getVersion().trailingLabel().asString._mock.setReturn('foo@foo:foo')
mock.mock(facade, '_getRepositoryClient')
facade._getRepositoryClient().getTroves._mock.setReturn(troves, tups,
withFiles=False)
self.assertEquals(facade.getAllLabelsFromTroves(specs),
set(['foo@foo:foo', 'foo@foo:foo-bar', 'foo@foo:foo-baz']))
class QuietUpdateTest(rbuildhelp.RbuildHelper):
def testQuietUpdateCallback(self):
callback = conaryfacade._QuietUpdateCallback()
callback.setUpdateJob()
| {
"content_hash": "3106ed76bed3ce6c2cdb0144d8659889",
"timestamp": "",
"source": "github",
"line_count": 1065,
"max_line_length": 91,
"avg_line_length": 44.15023474178404,
"alnum_prop": 0.612292641429179,
"repo_name": "sassoftware/rbuild",
"id": "e79f86c4d3168f1fdc698f5f91eaedcd70cb4947",
"size": "47625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rbuild_test/unit_test/facadetest/conaryfacadetest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cucumber",
"bytes": "355"
},
{
"name": "Makefile",
"bytes": "17287"
},
{
"name": "Python",
"bytes": "997556"
},
{
"name": "Shell",
"bytes": "4530"
}
],
"symlink_target": ""
} |
"""Fichier contenant le module primaire salle."""
from math import sqrt
import re
from random import random, randint
from datetime import datetime
from abstraits.module import *
from primaires.format.fonctions import format_nb, supprimer_accents
from primaires.vehicule.vecteur import Vecteur
from .bonhomme_neige import *
from .config import cfg_salle
from .coordonnees import Coordonnees
from .decor import PrototypeDecor
from .etendue import Etendue
from .obstacle import Obstacle
from .porte import Porte
from .salle import Salle, ZONE_VALIDE, MNEMONIC_VALIDE
from .feu import Feu
from .sortie import Sortie
from .sorties import NOMS_SORTIES
from .zone import Zone
from .templates.terrain import Terrain
from .editeurs.aedit import EdtAedit
from .editeurs.decedit import EdtDecedit
from .editeurs.redit import EdtRedit
from .editeurs.sbedit import EdtSbedit
from .editeurs.zedit import EdtZedit
from . import cherchables
from . import commandes
from . import masques
from . import types
# Constantes
NB_MIN_NETTOYAGE = 20
NB_TICKS = 20
class Module(BaseModule):
"""Classe utilisée pour gérer des salles.
Dans la terminologie des MUDs, les salles sont des "cases" avec une
description et une liste de sorties possibles, que le joueur peut
emprunter. L'ensemble des salles consiste l'univers, auquel il faut
naturellement rajouter des PNJ et objets pour qu'il soit riche un minimum.
Pour plus d'informations, consultez le fichier
src/primaires/salle/salle.py contenant la classe Salle.
"""
def __init__(self, importeur):
"""Constructeur du module"""
BaseModule.__init__(self, importeur, "salle", "primaire")
self._salles = {} # ident:salle
self.feux = {} # ident:feu
self._zones = {} # cle:zone
self._coords = {} # coordonnee:salle
self.commandes = []
self.salle_arrivee = ""
self.salle_retour = ""
self.p_nettoyer = True
self.aliases = {
"e": "est",
"se": "sud-est",
"s": "sud",
"so": "sud-ouest",
"o": "ouest",
"no": "nord-ouest",
"n": "nord",
"ne": "nord-est",
"b": "bas",
"h": "haut",
"s-e": "sud-est",
"s-o": "sud-ouest",
"n-o": "nord-ouest",
"n-e": "nord-est",
}
self.logger = importeur.man_logs.creer_logger( \
"salles", "salles")
self.terrains = {}
self.etendues = {}
self.obstacles = {}
self.ch_minute = []
self.ch_heure = []
self.ch_jour = []
self.ch_mois = []
self.ch_annee = []
# Liste des méthodes ajoutant des salles éventuelles à cartographier
# Par exemple, un éventuel module secondaire de navigation ajoute à
# cette liste une fonction retournant les bateaux. Cette fonction
# doit impérativement retourner une liste de salles sous la forme
# d'un tuple (nom, interieur, (x, y)) (interieur est un booléen).
self.salles_a_cartographier = []
self.graph = {}
self.details_dynamiques = []
self.decors = {}
self.bonhommes_neige = {}
self.a_renouveler = {}
self.magasins_a_ouvrir = {}
self.magasins_a_fermer = {}
# Constantes
self.TERRAINS_SANS_FEU = ("ville", "désert", "route", "aquatique",
"subaquatique", "rive")
# Ticks
self.ticks = {}
for no in range(1, NB_TICKS + 1):
self.ticks[no] = []
@property
def salles(self):
"""Retourne un dictionnaire déréférencé des salles."""
return dict(self._salles)
@property
def zones(self):
"""Retourne un dictionnaire déréférencé des zones."""
return dict(self._zones)
def config(self):
"""Méthode de configuration du module"""
importeur.anaconf.get_config("salle", \
"salle/salle.cfg", "config salle", cfg_salle)
importeur.hook.ajouter_hook("salle:regarder",
"Hook appelé dès qu'on regarde une salle.")
importeur.hook.ajouter_hook("salle:trouver_chemins_droits",
"Hook appelé quand on recherche les chemins droits " \
"d'une salle.")
# Ajout des terrain
self.ajouter_terrain("ville", "quelques maisons")
self.ajouter_terrain("route", "une route")
self.ajouter_terrain("forêt", "des forêts denses")
self.ajouter_terrain("plaine", "des plaines verdoyantes")
self.ajouter_terrain("rive", "une rive basse")
des = self.ajouter_terrain("désert", "des terres désertiques")
des.perte_endurance_dep = 4
self.ajouter_terrain("caverne", "une muraille de roches")
self.ajouter_terrain("aquatique", "des terres flottantes")
self.ajouter_terrain("subaquatique", "des terres sous-marines")
self.ajouter_terrain("quai de bois", "des quais de bois")
self.ajouter_terrain("quai de pierre", "des quais de pierre")
self.ajouter_terrain("falaise", "de hautes falaises")
self.ajouter_terrain("montagne", "de hautes montagnes")
self.ajouter_terrain("plage de sable blanc",
"des plages de sable blanc")
self.ajouter_terrain("plage de sable noir",
"des plages de sable noir")
self.ajouter_terrain("rocher", "un rocher à demi immergé")
self.ajouter_terrain("rempart", "un haut mur fortifié")
self.ajouter_terrain("récif", "une ligne de récifs")
self.ajouter_terrain("rapide", "de dangereux rapides")
self.ajouter_terrain("banc de sable",
"un banc de sable à demi immergé")
self.ajouter_terrain("corail", "une barrière de corail")
# On ajoute les niveaux
importeur.perso.ajouter_niveau("survie", "survie")
# On ajoute de l'état
etat = importeur.perso.ajouter_etat("collecte_bois")
etat.msg_refus = "Vous êtes en train de ramasser du bois."
etat.msg_visible = "ramasse du bois"
etat.act_autorisees = ["regarder", "parler"]
etat = importeur.perso.ajouter_etat("bonhomme_neige")
etat.msg_refus = "Vous êtes en train de fabriquer un bonhomme de neige."
etat.msg_visible = "fabrique un bonhomme de neige"
etat.act_autorisees = ["regarder", "parler"]
BaseModule.config(self)
def init(self):
"""Méthode d'initialisation du module"""
# On récupère les salles
salles = importeur.supenr.charger_groupe(Salle)
for salle in salles:
self.ajouter_salle(salle)
nb_salles = len(self._salles)
self.logger.info(format_nb(nb_salles, "{nb} salle{s} récupérée{s}", \
fem=True))
# On récupère les étendues
etendues = self.importeur.supenr.charger_groupe(Etendue)
for etendue in etendues:
self.ajouter_etendue(etendue)
nb_etendues = len(self.etendues)
self.logger.info(format_nb(nb_etendues, "{nb} étendue{s} " \
"d'eau{x} récupérée{s}", fem=True))
# On récupère les obstacles
obstacles = self.importeur.supenr.charger_groupe(Obstacle)
for obstacle in obstacles:
self.ajouter_obstacle(obstacle)
# On récupère les décors
decors = importeur.supenr.charger_groupe(PrototypeDecor)
for decor in decors:
self.ajouter_decor(decor)
nb_decors = len(self.decors)
self.logger.info(format_nb(nb_decors, "{nb} décor{s} récupéré{s}"))
# On récupère les bonhommes de neige
bonhommes = importeur.supenr.charger_groupe(PrototypeBonhommeNeige)
for bonhomme in bonhommes:
self.ajouter_bonhomme_neige(bonhomme)
nb_bonhommes = len(self.bonhommes_neige)
self.logger.info(format_nb(nb_bonhommes, "{nb} prototype{s} " \
"de bonhomme de neige récupéré{s}"))
# On récupère les feux
feux = importeur.supenr.charger_groupe(Feu)
for feu in feux:
self.feux[feu.salle.ident] = feu
# On implémente le hook correspondant
self.importeur.hook["salle:regarder"].ajouter_evenement(
self.feu_present)
# On récupère les zones
zones = importeur.supenr.charger_groupe(Zone)
for zone in zones:
self._zones[zone.cle] = zone
nb_zones = len(self._zones)
self.logger.info(format_nb(nb_zones, "{nb} zone{s} récupérée{s}", \
fem=True))
importeur.diffact.ajouter_action("net_salles", 300,
self.nettoyer_salles)
importeur.diffact.ajouter_action("repop_salles", 900,
self.repop_salles)
importeur.diffact.ajouter_action("repop_feux", 5, Feu.repop)
# On ajoute les talents
importeur.perso.ajouter_talent("collecte_bois", "collecte de bois",
"survie", 0.55)
importeur.perso.ajouter_talent("feu_camp", "feu de camp", "survie",
0.23)
# Ajout des actions différées pour chaque tick
intervalle = 60 / NB_TICKS
for no in self.ticks.keys():
self.importeur.diffact.ajouter_action("stick_{}".format(no),
intervalle * no, self.tick, no)
# Ajout des hooks de changement de temps
self.importeur.hook["temps:minute"].ajouter_evenement(
self.changer_minute)
self.importeur.hook["temps:heure"].ajouter_evenement(
self.changer_heure)
self.importeur.hook["temps:jour"].ajouter_evenement(
self.changer_jour)
self.importeur.hook["temps:mois"].ajouter_evenement(
self.changer_mois)
self.importeur.hook["temps:annee"].ajouter_evenement(
self.changer_annee)
BaseModule.init(self)
def ajouter_commandes(self):
"""Ajout des commandes dans l'interpréteur"""
self.commandes = [
commandes.addroom.CmdAddroom(),
commandes.carte.CmdCarte(),
commandes.chercherbois.CmdChercherBois(),
commandes.chsortie.CmdChsortie(),
commandes.decor.CmdDecor(),
commandes.deverrouiller.CmdDeverrouiller(),
commandes.escalader.CmdEscalader(),
commandes.etendue.CmdEtendue(),
commandes.fermer.CmdFermer(),
commandes.goto.CmdGoto(),
commandes.mettrefeu.CmdMettreFeu(),
commandes.nager.CmdNager(),
commandes.neige.CmdNeige(),
commandes.ouvrir.CmdOuvrir(),
commandes.redit.CmdRedit(),
commandes.regarder.CmdRegarder(),
commandes.supsortie.CmdSupsortie(),
commandes.verrouiller.CmdVerrouiller(),
commandes.zone.CmdZone(),
]
for cmd in self.commandes:
importeur.interpreteur.ajouter_commande(cmd)
# Ajout des éditeurs 'decedit', 'redit' et 'zedit'
importeur.interpreteur.ajouter_editeur(EdtAedit)
importeur.interpreteur.ajouter_editeur(EdtDecedit)
importeur.interpreteur.ajouter_editeur(EdtRedit)
importeur.interpreteur.ajouter_editeur(EdtSbedit)
importeur.interpreteur.ajouter_editeur(EdtZedit)
def preparer(self):
"""Préparation du module.
On vérifie que :
- Les salles de retour et d'arrivée sont bien créés (sinon,
on les recrée)
- On recrée le lien entre sorties et salles
- Les personnages présents dans self._personnages soient
toujours là
- Chaque salle est dans une zone
- Chaque terrain a sa réciproque en obstacle
- Les étendues ont toutes un contour défini
- Les étendues détemrinent leurs segments de liens
"""
# On récupère la configuration
conf_salle = importeur.anaconf.get_config("salle")
salle_arrivee = conf_salle.salle_arrivee
salle_retour = conf_salle.salle_retour
if salle_arrivee not in self:
# On crée la salle d'arrivée
zone, mnemonic = salle_arrivee.split(":")
salle_arrivee = self.creer_salle(zone, mnemonic, valide=False)
salle_arrivee.titre = "La salle d'arrivée"
salle_arrivee = salle_arrivee.ident
if salle_retour not in self:
# On crée la salle de retour
zone, mnemonic = salle_retour.split(":")
salle_retour = self.creer_salle(zone, mnemonic, valide=False)
salle_retour.titre = "La salle de retour"
salle_retour = salle_retour.ident
self.salle_arrivee = salle_arrivee
self.salle_retour = salle_retour
# On prépare les sorties
for salle in self.salles.values():
if salle.magasin:
magasin = salle.magasin
if magasin.renouveler_ouverture:
liste = self.a_renouveler.get(magasin.ouverture, [])
liste.append(magasin)
self.a_renouveler[magasin.ouverture] = liste
if magasin.renouveler_fermeture:
liste = self.a_renouveler.get(magasin.fermeture, [])
liste.append(magasin)
self.a_renouveler[magasin.ouverture] = liste
liste = self.magasins_a_ouvrir.get(magasin.ouverture, [])
liste.append(magasin)
self.magasins_a_ouvrir[magasin.ouverture] = liste
liste = self.magasins_a_fermer.get(magasin.fermeture, [])
liste.append(magasin)
self.magasins_a_fermer[magasin.fermeture] = liste
for sortie in list(salle.sorties):
try:
salle_dest = self.salles[sortie.salle_dest]
except KeyError:
salle.sorties.supprimer_sortie(sortie.direction)
else:
if salle_dest is None or not salle_dest.e_existe:
salle.sorties.supprimer_sortie(sortie.direction)
else:
sortie.salle_dest = salle_dest
zone = salle.zone
zone.ajouter(salle)
for personnage in salle.personnages:
if personnage.salle is not salle:
salle.retirer_personnage(personnage)
# On ajoute les salles au renouvellement automatique
self.inscrire_salle(salle)
# On recrée les obstacles
for nom, terrain in self.terrains.items():
if nom not in self.obstacles:
self.creer_obstacle(terrain.nom, terrain.desc_survol)
# Ajout des affections
for affection in salle.affections.values():
affection.prevoir_tick()
# On parcour les étendues
for etendue in self.etendues.values():
x, y = etendue.origine
if x is not None and y is not None:
etendue.trouver_contour()
etendue.determiner_segments_liens()
def detruire(self):
"""Destruction du module.
* On détruit toutes les zones vides
"""
for zone in self._zones.values():
if not zone.salles:
zone.detruire()
def __len__(self):
"""Retourne le nombre de salles"""
return len(self._salles)
def __getitem__(self, cle):
"""Retourne la salle correspondante à la clé.
Celle-ci peut être de différents types :
* une chaîne : c'est l'identifiant 'zone:mnemonic'
* un objet Coordonnees
* un tuple représentant les coordonnées
"""
if type(cle) is str:
return self._salles[cle]
elif type(cle) is Coordonnees:
return self._coords[cle.tuple()]
elif type(cle) is tuple:
return self._coords[cle]
else:
raise TypeError("un type non traité sert d'identifiant " \
"({})".format(repr(cle)))
def __contains__(self, cle):
"""Retourne True si la clé se trouve dans l'un des dictionnaires de
salles. Voir la méthode __getitem__ pour connaître les types acceptés.
"""
if type(cle) is str:
return cle in self._salles.keys()
elif type(cle) is Coordonnees:
return cle.tuple() in self._coords.keys()
elif type(cle) is tuple:
return cle in self._coords.keys()
else:
raise TypeError("un type non traité sert d'identifiant " \
"({})".format(repr(cle)))
def ajouter_salle(self, salle):
"""Ajoute la salle aux deux dictionnaires
self._salles et self._coords.
"""
self._salles[salle.ident] = salle
if salle.coords.valide:
self._coords[salle.coords.tuple()] = salle
def creer_salle(self, zone, mnemonic, x=0, y=0, z=0, valide=True):
"""Permet de créer une salle"""
ident = zone + ":" + mnemonic
if ident in self._salles.keys():
raise ValueError("la salle {} existe déjà".format(ident))
if not re.search(ZONE_VALIDE, zone):
raise ValueError("Zone {} invalide".format(zone))
if not re.search(MNEMONIC_VALIDE, mnemonic):
raise ValueError("Mnémonique {} invalide ({})".format(mnemonic,
MNEMONIC_VALIDE))
salle = Salle(zone, mnemonic, x, y, z, valide)
salle.zone.ajouter(salle)
self.ajouter_salle(salle)
return salle
def supprimer_salle(self, cle):
"""Supprime la salle.
La clé est l'identifiant de la salle.
"""
salle = self._salles[cle]
coords = salle.coords
if coords.valide and coords.tuple() in self._coords.keys():
del self._coords[coords.tuple()]
del self._salles[cle]
salle.detruire()
def creer_decor(self, cle):
"""Créée un nouveau prototype de décor."""
cle = cle.lower()
if cle in self.decors:
raise ValueError("le décor {} existe déjà".format(repr(cle)))
decor = PrototypeDecor(cle)
self.ajouter_decor(decor)
return decor
def ajouter_decor(self, decor):
"""Ajoute un prototype de décor."""
if decor.cle in self.decors:
raise ValueError("le décor {} existe déjà".format(repr(decor.cle)))
self.decors[decor.cle] = decor
def supprimer_decor(self, cle):
"""Supprime un prototype de décor."""
if cle not in self.decors:
raise ValueError("le décor {} n'existe pas".format(repr(cle)))
decor = self.decors[cle]
del self.decors[cle]
decor.detruire()
def creer_bonhomme_neige(self, cle):
"""Créée un nouveau prototype de bonhomme de neige."""
cle = cle.lower()
if cle in self.bonhommes_neige or cle in self.decors:
raise ValueError("le bonhomme de neige {} existe déjà".format(
repr(cle)))
bonhomme = PrototypeBonhommeNeige(cle)
self.ajouter_bonhomme_neige(bonhomme)
return bonhomme
def ajouter_bonhomme_neige(self, bonhomme):
"""Ajoute un prototype de bonhomme de neige."""
if bonhomme.cle in self.bonhommes_neige:
raise ValueError("le bonhomme de neige {} existe déjà".format(
repr(bonhomme.cle)))
self.bonhommes_neige[bonhomme.cle] = bonhomme
self.decors[bonhomme.cle] = bonhomme
def supprimer_bonhomme_neige(self, cle):
"""Supprime un prototype de bonhomme de neige."""
if cle not in self.bonhommes_neige:
raise ValueError("le bonhomme de neige {} n'existe pas".format(
repr(cle)))
bonhomme = self.bonhommes_neige[cle]
del self.bonhommes_neige[cle]
bonhomme.detruire()
def traiter_commande(self, personnage, commande):
"""Traite les déplacements"""
# Si la commande est vide, on ne se déplace pas
if len(commande) == 0:
return False
commande = supprimer_accents(commande).lower()
salle = personnage.salle
try:
sortie = salle.sorties.get_sortie_par_nom(commande,
cachees=False)
except KeyError:
pass
else:
personnage.deplacer_vers(sortie.nom)
return True
for nom, sortie in salle.sorties.iter_couple():
if sortie and sortie.salle_dest:
nom = supprimer_accents(sortie.nom).lower()
if (sortie.cachee and nom == commande) or ( \
not sortie.cachee and nom.startswith(commande)):
personnage.deplacer_vers(sortie.nom)
return True
if commande in NOMS_SORTIES.keys():
personnage << "Vous ne pouvez aller par là..."
return True
return False
def changer_ident(self, ancien_ident, nouveau_ident):
"""Change l'identifiant d'une salle"""
salle = self._salles[ancien_ident]
del self._salles[ancien_ident]
self._salles[nouveau_ident] = salle
# On change la salle de zone si la zone est différente
a_zone = ancien_ident.split(":")[0]
n_zone = nouveau_ident.split(":")[0]
if a_zone != n_zone:
self.get_zone(a_zone).retirer(salle)
self.get_zone(n_zone).ajouter(salle)
def changer_coordonnees(self, ancien_tuple, nouvelles_coords):
"""Change les coordonnées d'une salle.
Les anciennes coordonnées sont données sous la forme d'un tuple
(x, y, z, valide)
Les nouvelles sont un objet Coordonnees.
"""
a_x, a_y, a_z, a_valide = ancien_tuple
salle = nouvelles_coords.parent
if a_valide and (a_x, a_y, a_z) in self._coords:
# on va supprimer les anciennes coordonnées
del self._coords[a_x, a_y, a_z]
if salle and nouvelles_coords.valide:
self._coords[nouvelles_coords.tuple()] = salle
def ajouter_terrain(self, nom, survol):
"""Ajoute un terrain."""
if nom in self.terrains:
raise KeyError("le terrain {] existe déjà".format(repr(nom)))
terrain = Terrain(nom, survol)
self.terrains[nom] = terrain
return terrain
def get_terrain(self, nom):
"""Retourne le terrain si trouvé.
La recherche se fait indépendemment des accents, majuscules et
minuscules. Si le terrain n'est pas trouvé, retourne None.
"""
nom = supprimer_accents(nom).lower()
for terrain in self.terrains.values():
if supprimer_accents(terrain.nom).lower() == nom:
return terrain
return None
def creer_etendue(self, cle):
"""Crée une étendue d'eau."""
if cle in self.etendues.keys():
raise KeyError("l'étendue d'eau {} existe déjà".format(cle))
etendue = Etendue(cle)
self.ajouter_etendue(etendue)
return etendue
def ajouter_etendue(self, etendue):
"""Ajoute une étendue au dictionnaire."""
if etendue.cle in self.etendues.keys():
raise KeyError("l'étendue d'eau {} existe déjà".format(
etendue.cle))
self.etendues[etendue.cle] = etendue
def supprimer_etendue(self, cle):
"""Supprime l'étendue d'eau."""
etendue = self.etendues[cle]
etendue.detruire()
del self.etendues[cle]
def creer_obstacle(self, *args, **kw_args):
"""Création d'un obstacle."""
obstacle = Obstacle(*args, **kw_args)
self.ajouter_obstacle(obstacle)
return obstacle
def ajouter_obstacle(self, obstacle):
"""Ajoute un obstacle dans le dictionnaire du module."""
if obstacle.nom in self.obstacles:
raise ValueError("l'obstacle {} existe déjà".format(obstacle.nom))
self.obstacles[obstacle.nom] = obstacle
def supprimer_obstacle(self, nom):
"""Détruit l'obstacle."""
obstacle = self.obstacles[nom]
obstacle.detruire()
del self.obstacles[nom]
def get_zone(self, cle):
"""Retourne la zone correspondante ou la crée."""
zone = self._zones.get(cle)
if zone is None:
zone = Zone(cle)
self._zones[cle] = zone
return zone
def nettoyer_salles(self):
"""Nettoyage des salles et des objets trop vieux."""
if not self.p_nettoyer:
return
importeur.diffact.ajouter_action("net_salles", 300,
self.nettoyer_salles)
maintenant = datetime.now()
for s in self.salles.values():
objets = [o for o in s.objets_sol._objets if o.nettoyer]
for o in objets:
if (maintenant - o.ajoute_a).seconds >= NB_MIN_NETTOYAGE * 60:
importeur.objet.supprimer_objet(o.identifiant)
def repop_salles(self):
"""Méthode chargée de repop les salles."""
importeur.diffact.ajouter_action("repop_salles", 900,
self.repop_salles)
for s in self.salles.values():
s.repop()
def allumer_feu(self, salle, puissance=10):
"""Allume un feu dans salle."""
if not salle.ident in self.feux:
feu = Feu(salle, puissance)
self.feux[salle.ident] = feu
else:
feu = salle.feux[salle.ident]
return feu
def eteindre_feu(self, salle):
"""Eteint un éventuel feu dans salle."""
if salle.ident in self.feux:
self.feux[salle.ident].detruire()
del self.feux[salle.ident]
def feu_present(self, salle, liste_messages, flags):
"""Si un feu se trouve dans la salle, on l'affiche"""
if self.feux:
for feu in self.feux.values():
if salle is feu.salle:
liste_messages.insert(0, str(feu))
return
def tick(self, no):
"""Exécute un tick."""
self.importeur.diffact.ajouter_action("stick_{}".format(no),
60, self.tick, no)
# On sélectionne les salles à tick
salles = list(self._salles.values())
tick = []
i = no - 1
while i < len(salles):
try:
s = salles[i]
except IndexError:
pass
else:
tick.append(s)
i += NB_TICKS
for s in tick:
s.tick()
def peut_allumer_feu(self, salle):
"""Retourne si on peut allumer un feu dans cette salle ou non."""
if not salle.a_detail_flag("cheminée") and (salle.interieur or \
salle.nom_terrain in self.TERRAINS_SANS_FEU):
return False
for affection in salle.affections.values():
if affection.affection.a_flag("humide"):
return False
return True
def allumer_ou_recharger(self, personnage, utiliser_pierre=True,
utiliser_niveau=True):
"""Allume ou recharge un feu."""
salle = personnage.salle
if "neige" in salle.affections:
personnage << "|err|Il fait trop humide.|ff|"
return
objets_sol = list(salle.objets_sol)
somme_combu = 0
for objet in list(objets_sol):
if objet.est_de_type("combustible"):
somme_combu += objet.qualite
if not somme_combu:
personnage << "|err|Il n'y a rien qui puisse brûler par ici.|ff|"
return
# On tente d'allumer ou de nourrir le feu
if salle.ident in self.feux:
feu = self.feux[salle.ident]
feu.puissance += somme_combu
personnage << "Vous poussez du bois dans le feu et celui-ci " \
"gagne en vigueur et en éclat."
for objet in objets_sol:
if objet.est_de_type("combustible"):
importeur.objet.supprimer_objet(objet.identifiant)
else:
if not self.peut_allumer_feu(salle):
personnage << "|err|Vous ne pouvez pas faire de feu ici.|ff|"
return
efficacite_pierre = 100
if utiliser_pierre:
pierre = None
for objet, qtt, t_conteneur in \
personnage.equipement.inventaire.iter_objets_qtt(
conteneur=True):
if objet.est_de_type("pierre à feu"):
pierre = objet
conteneur = t_conteneur
break
if not pierre:
personnage << "|err|Vous ne tenez rien pour allumer.|ff|"
return
efficacite_pierre = pierre.efficacite
if pierre.efficacite > 0:
pierre.efficacite -= 1
if utiliser_niveau:
personnage.pratiquer_talent("feu_camp")
niveau = sqrt(personnage.get_talent("feu_camp") / 100)
else:
niveau = 1
efficace = efficacite_pierre / 50
proba_marche = random()
# Si la pierre fonctionne
if proba_marche <= efficace:
proba_reussit = round(random(), 1)
if proba_reussit <= niveau:
personnage << "Une étincelle vole et le feu prend."
feu = importeur.salle.allumer_feu(salle, somme_combu)
personnage.gagner_xp("survie", somme_combu * 20)
for objet in objets_sol:
if objet.est_de_type("combustible"):
if objet.identifiant:
importeur.objet.supprimer_objet(
objet.identifiant)
feu.stabilite = 1 - niveau ** (1 / 3)
return
personnage << "Le feu refuse de démarrer."
proba_casse = random()
solidite = efficace ** (1 / 5)
if proba_casse >= solidite and utiliser_pierre:
personnage << "{} se brise en mille morceaux.".format(
pierre.nom_singulier)
conteneur.retirer(pierre)
importeur.objet.supprimer_objet(pierre.identifiant)
def inscrire_salle(self, salle):
"""Inscrit la salle dans le changement de temps."""
if salle.script["changer"]["minute"].tests:
if salle not in self.ch_minute:
self.ch_minute.append(salle)
elif salle in self.ch_minute:
self.ch_minute.remove(salle)
if salle.script["changer"]["heure"].tests:
if salle not in self.ch_heure:
self.ch_heure.append(salle)
elif salle in self.ch_heure:
self.ch_heure.remove(salle)
if salle.script["changer"]["jour"].tests:
if salle not in self.ch_jour:
self.ch_jour.append(salle)
elif salle in self.ch_jour:
self.ch_jour.remove(salle)
if salle.script["changer"]["mois"].tests:
if salle not in self.ch_mois:
self.ch_mois.append(salle)
elif salle in self.ch_mois:
self.ch_mois.remove(salle)
if salle.script["changer"]["année"].tests:
if salle not in self.ch_annee:
self.ch_annee.append(salle)
elif salle in self.ch_annee:
self.ch_annee.remove(salle)
def changer_minute(self, temps):
"""Hook appelé à chaque changement de minute."""
minute, heure, jour, mois, annee = temps.minute, temps.heure, \
temps.jour + 1, temps.mois + 1, temps.annee
for salle in self.ch_minute:
salle.script["changer"]["minute"].executer(salle=salle,
minute=minute, heure=heure, jour=jour, mois=mois,
annee=annee)
def changer_heure(self, temps):
"""Hook appelé à chaque changement d'heure."""
minute, heure, jour, mois, annee = temps.minute, temps.heure, \
temps.jour + 1, temps.mois + 1, temps.annee
for salle in self.ch_heure:
salle.script["changer"]["heure"].executer(salle=salle,
minute=minute, heure=heure, jour=jour, mois=mois,
annee=annee)
def changer_jour(self, temps):
"""Hook appelé à chaque changement de jour."""
minute, heure, jour, mois, annee = temps.minute, temps.heure, \
temps.jour + 1, temps.mois + 1, temps.annee
for salle in self.ch_jour:
salle.script["changer"]["jour"].executer(salle=salle,
minute=minute, heure=heure, jour=jour, mois=mois,
annee=annee)
def changer_mois(self, temps):
"""Hook appelé à chaque changement de mois."""
minute, heure, jour, mois, annee = temps.minute, temps.heure, \
temps.jour + 1, temps.mois + 1, temps.annee
for salle in self.ch_mois:
salle.script["changer"]["mois"].executer(salle=salle,
minute=minute, heure=heure, jour=jour, mois=mois,
annee=annee)
def changer_annee(self, temps):
"""Hook appelé à chaque changement d'année."""
minute, heure, jour, mois, annee = temps.minute, temps.heure, \
temps.jour + 1, temps.mois + 1, temps.annee
for salle in self.ch_annee:
salle.script["changer"]["annee"].executer(salle=salle,
minute=minute, heure=heure, jour=jour, mois=mois,
annee=annee)
| {
"content_hash": "e7850dbbb3c543dcfe446cca1c9fa09e",
"timestamp": "",
"source": "github",
"line_count": 905,
"max_line_length": 80,
"avg_line_length": 37.392265193370164,
"alnum_prop": 0.5745271867612293,
"repo_name": "stormi/tsunami",
"id": "58c26b13e2ee0045c7229ccd43a64eee71367695",
"size": "35590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/salle/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
"""y/Page is a enterprise-grade y/OS runtime""" | {
"content_hash": "9ee734b9ddd3a089f96ebbe318942d55",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 47,
"avg_line_length": 47,
"alnum_prop": 0.7021276595744681,
"repo_name": "piotrmaslanka/systemy",
"id": "0f8949fcc74c4e96b8a62ad027b650f06f151b20",
"size": "47",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ypage/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54086"
}
],
"symlink_target": ""
} |
import logging
from appengine_django.db.base import DatabaseWrapper
from appengine_django.db.base import destroy_datastore
from appengine_django.db.base import get_test_datastore_paths
def create_test_db(*args, **kw):
"""Destroys the test datastore. A new store will be recreated on demand"""
destroy_test_db()
# Ensure the new store that is created uses the test datastore.
from django.db import connection
connection.use_test_datastore = True
connection.flush()
def destroy_test_db(*args, **kw):
"""Destroys the test datastore files."""
destroy_datastore(*get_test_datastore_paths())
logging.debug("Destroyed test datastore")
| {
"content_hash": "6a4e6a4309982073c4ab4440c0d05b64",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 76,
"avg_line_length": 32.55,
"alnum_prop": 0.7572964669738863,
"repo_name": "pfeilbr/qik",
"id": "18e669a17d4d904f17fc5a6f0b51ab186a145da3",
"size": "1251",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "appengine_django/db/creation.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "3076916"
},
{
"name": "Python",
"bytes": "114942"
}
],
"symlink_target": ""
} |
__author__ = 'Dima Potekihin'
class DotNetSystemLib(object):
def __init__(self, name, framework):
self._name = name
self._framework = framework
@property
def name(self):
return self._name
@property
def framework(self):
return self._framework
| {
"content_hash": "43ace19f073e59444fa882b7c7595b12",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 40,
"avg_line_length": 19.866666666666667,
"alnum_prop": 0.6006711409395973,
"repo_name": "open-epicycle/epicycle.derkonfigurator-py",
"id": "5d125a378762062d2471f78b19f5b9da251e5ede",
"size": "298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/epicycle.derkonfigurator-py/epicycle/derkonfigurator/externals/DotNetSystemLib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "705"
},
{
"name": "C#",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "54738"
}
],
"symlink_target": ""
} |
import pytest
from unittest import mock
import io
import json
import time
import hashlib
import furl
import aiohttp
import aiohttp.multidict
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.cloudfiles import settings
from waterbutler.providers.cloudfiles import CloudFilesProvider
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': '[email protected]',
}
@pytest.fixture
def credentials():
return {
'username': 'prince',
'token': 'revolutionary',
'region': 'iad',
}
@pytest.fixture
def settings():
return {'container': 'purple rain'}
@pytest.fixture
def provider(auth, credentials, settings):
return CloudFilesProvider(auth, credentials, settings)
@pytest.fixture
def auth_json():
return {
"access": {
"serviceCatalog": [
{
"name": "cloudFiles",
"type": "object-store",
"endpoints": [
{
"publicURL": "https://storage101.iad3.clouddrive.com/v1/MossoCloudFS_926294",
"internalURL": "https://snet-storage101.iad3.clouddrive.com/v1/MossoCloudFS_926294",
"region": "IAD",
"tenantId": "MossoCloudFS_926294"
},
]
}
],
"token": {
"RAX-AUTH:authenticatedBy": [
"APIKEY"
],
"tenant": {
"name": "926294",
"id": "926294"
},
"id": "2322f6b2322f4dbfa69802baf50b0832",
"expires": "2014-12-17T09:12:26.069Z"
},
"user": {
"name": "osf-production",
"roles": [
{
"name": "object-store:admin",
"id": "10000256",
"description": "Object Store Admin Role for Account User"
},
{
"name": "compute:default",
"description": "A Role that allows a user access to keystone Service methods",
"id": "6",
"tenantId": "926294"
},
{
"name": "object-store:default",
"description": "A Role that allows a user access to keystone Service methods",
"id": "5",
"tenantId": "MossoCloudFS_926294"
},
{
"name": "identity:default",
"id": "2",
"description": "Default Role."
}
],
"id": "secret",
"RAX-AUTH:defaultRegion": "IAD"
}
}
}
@pytest.fixture
def token(auth_json):
return auth_json['access']['token']['id']
@pytest.fixture
def endpoint(auth_json):
return auth_json['access']['serviceCatalog'][0]['endpoints'][0]['publicURL']
@pytest.fixture
def temp_url_key():
return 'temporary beret'
@pytest.fixture
def mock_auth(auth_json):
aiohttpretty.register_json_uri(
'POST',
settings.AUTH_URL,
body=auth_json,
)
@pytest.fixture
def mock_temp_key(endpoint, temp_url_key):
aiohttpretty.register_uri(
'HEAD',
endpoint,
status=204,
headers={'X-Account-Meta-Temp-URL-Key': temp_url_key},
)
@pytest.fixture
def mock_time(monkeypatch):
mock_time = mock.Mock()
mock_time.return_value = 10
monkeypatch.setattr(time, 'time', mock_time)
@pytest.fixture
def connected_provider(provider, token, endpoint, temp_url_key, mock_time):
provider.token = token
provider.endpoint = endpoint
provider.temp_url_key = temp_url_key.encode()
return provider
@pytest.fixture
def file_content():
return b'sleepy'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
@pytest.fixture
def file_metadata():
return aiohttp.multidict.CIMultiDict([
('LAST-MODIFIED', 'Thu, 25 Dec 2014 02:54:35 GMT'),
('CONTENT-LENGTH', '0'),
('ETAG', 'edfa12d00b779b4b37b81fe5b61b2b3f'),
('CONTENT-TYPE', 'text/html; charset=UTF-8'),
('X-TRANS-ID', 'txf876a4b088e3451d94442-00549b7c6aiad3'),
('DATE', 'Thu, 25 Dec 2014 02:54:34 GMT')
])
# Metadata Test Scenarios
# / (folder_root_empty)
# / (folder_root)
# /level1/ (folder_root_level1)
# /level1/level2/ (folder_root_level1_level2)
# /level1/level2/file2.file - (file_root_level1_level2_file2_txt)
# /level1_empty/ (folder_root_level1_empty)
# /similar (file_similar)
# /similar.name (file_similar_name)
# /does_not_exist (404)
# /does_not_exist/ (404)
@pytest.fixture
def folder_root_empty():
return []
@pytest.fixture
def folder_root():
return [
{
'last_modified': '2014-12-19T22:08:23.006360',
'content_type': 'application/directory',
'hash': 'd41d8cd98f00b204e9800998ecf8427e',
'name': 'level1',
'bytes': 0
},
{
'subdir': 'level1/'
},
{
'last_modified': '2014-12-19T23:22:23.232240',
'content_type': 'application/x-www-form-urlencoded;charset=utf-8',
'hash': 'edfa12d00b779b4b37b81fe5b61b2b3f',
'name': 'similar',
'bytes': 190
},
{
'last_modified': '2014-12-19T23:22:14.728640',
'content_type': 'application/x-www-form-urlencoded;charset=utf-8',
'hash': 'edfa12d00b779b4b37b81fe5b61b2b3f',
'name': 'similar.file',
'bytes': 190
},
{
'last_modified': '2014-12-19T23:20:16.718860',
'content_type': 'application/directory',
'hash': 'd41d8cd98f00b204e9800998ecf8427e',
'name': 'level1_empty',
'bytes': 0
}
]
@pytest.fixture
def folder_root_level1():
return [
{
'last_modified': '2014-12-19T22:08:26.958830',
'content_type': 'application/directory',
'hash': 'd41d8cd98f00b204e9800998ecf8427e',
'name': 'level1/level2',
'bytes': 0
},
{
'subdir': 'level1/level2/'
}
]
@pytest.fixture
def folder_root_level1_level2():
return [
{
'name': 'level1/level2/file2.txt',
'content_type': 'application/x-www-form-urlencoded;charset=utf-8',
'last_modified': '2014-12-19T23:25:22.497420',
'bytes': 1365336,
'hash': 'ebc8cdd3f712fd39476fb921d43aca1a'
}
]
@pytest.fixture
def file_root_level1_level2_file2_txt():
return aiohttp.multidict.CIMultiDict([
('ORIGIN', 'https://mycloud.rackspace.com'),
('CONTENT-LENGTH', '216945'),
('ACCEPT-RANGES', 'bytes'),
('LAST-MODIFIED', 'Mon, 22 Dec 2014 19:01:02 GMT'),
('ETAG', '44325d4f13b09f3769ede09d7c20a82c'),
('X-TIMESTAMP', '1419274861.04433'),
('CONTENT-TYPE', 'text/plain'),
('X-TRANS-ID', 'tx836375d817a34b558756a-0054987deeiad3'),
('DATE', 'Mon, 22 Dec 2014 20:24:14 GMT')
])
@pytest.fixture
def folder_root_level1_empty():
return aiohttp.multidict.CIMultiDict([
('ORIGIN', 'https://mycloud.rackspace.com'),
('CONTENT-LENGTH', '0'),
('ACCEPT-RANGES', 'bytes'),
('LAST-MODIFIED', 'Mon, 22 Dec 2014 18:58:56 GMT'),
('ETAG', 'd41d8cd98f00b204e9800998ecf8427e'),
('X-TIMESTAMP', '1419274735.03160'),
('CONTENT-TYPE', 'application/directory'),
('X-TRANS-ID', 'txd78273e328fc4ba3a98e3-0054987eeeiad3'),
('DATE', 'Mon, 22 Dec 2014 20:28:30 GMT')
])
@pytest.fixture
def file_root_similar():
return aiohttp.multidict.CIMultiDict([
('ORIGIN', 'https://mycloud.rackspace.com'),
('CONTENT-LENGTH', '190'),
('ACCEPT-RANGES', 'bytes'),
('LAST-MODIFIED', 'Fri, 19 Dec 2014 23:22:24 GMT'),
('ETAG', 'edfa12d00b779b4b37b81fe5b61b2b3f'),
('X-TIMESTAMP', '1419031343.23224'),
('CONTENT-TYPE', 'application/x-www-form-urlencoded;charset=utf-8'),
('X-TRANS-ID', 'tx7cfeef941f244807aec37-005498754diad3'),
('DATE', 'Mon, 22 Dec 2014 19:47:25 GMT')
])
@pytest.fixture
def file_root_similar_name():
return aiohttp.multidict.CIMultiDict([
('ORIGIN', 'https://mycloud.rackspace.com'),
('CONTENT-LENGTH', '190'),
('ACCEPT-RANGES', 'bytes'),
('LAST-MODIFIED', 'Mon, 22 Dec 2014 19:07:12 GMT'),
('ETAG', 'edfa12d00b779b4b37b81fe5b61b2b3f'),
('X-TIMESTAMP', '1419275231.66160'),
('CONTENT-TYPE', 'application/x-www-form-urlencoded;charset=utf-8'),
('X-TRANS-ID', 'tx438cbb32b5344d63b267c-0054987f3biad3'),
('DATE', 'Mon, 22 Dec 2014 20:29:47 GMT')
])
class TestCRUD:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download(self, connected_provider):
body = b'dearly-beloved'
path = WaterButlerPath('/lets-go-crazy')
url = connected_provider.sign_url(path)
aiohttpretty.register_uri('GET', url, body=body, auto_length=True)
result = await connected_provider.download(path)
content = await result.read()
assert content == body
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_accept_url(self, connected_provider):
body = b'dearly-beloved'
path = WaterButlerPath('/lets-go-crazy')
url = connected_provider.sign_url(path)
parsed_url = furl.furl(url)
parsed_url.args['filename'] = 'lets-go-crazy'
result = await connected_provider.download(path, accept_url=True)
assert result == parsed_url.url
aiohttpretty.register_uri('GET', url, body=body)
response = await aiohttp.request('GET', url)
content = await response.read()
assert content == body
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_not_found(self, connected_provider):
path = WaterButlerPath('/lets-go-crazy')
url = connected_provider.sign_url(path)
aiohttpretty.register_uri('GET', url, status=404)
with pytest.raises(exceptions.DownloadError):
await connected_provider.download(path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload(self, connected_provider, file_content, file_stream, file_metadata):
path = WaterButlerPath('/foo.bar')
content_md5 = hashlib.md5(file_content).hexdigest()
metadata_url = connected_provider.build_url(path.path)
url = connected_provider.sign_url(path, 'PUT')
aiohttpretty.register_uri(
'HEAD',
metadata_url,
responses=[
{'status': 404},
{'headers': file_metadata},
]
)
aiohttpretty.register_uri('PUT', url, status=200, headers={'ETag': '"{}"'.format(content_md5)})
metadata, created = await connected_provider.upload(file_stream, path)
assert created is True
assert metadata.kind == 'file'
assert aiohttpretty.has_call(method='PUT', uri=url)
assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete(self, connected_provider):
path = WaterButlerPath('/delete.file')
url = connected_provider.build_url(path.path)
aiohttpretty.register_uri('DELETE', url, status=204)
await connected_provider.delete(path)
assert aiohttpretty.has_call(method='DELETE', uri=url)
class TestMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_root_empty(self, connected_provider, folder_root_empty):
path = WaterButlerPath('/')
body = json.dumps(folder_root_empty).encode('utf-8')
url = connected_provider.build_url(path.path, prefix=path.path, delimiter='/')
aiohttpretty.register_uri('GET', url, status=200, body=body)
result = await connected_provider.metadata(path)
assert len(result) == 0
assert result == []
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_root(self, connected_provider, folder_root):
path = WaterButlerPath('/')
body = json.dumps(folder_root).encode('utf-8')
url = connected_provider.build_url('', prefix=path.path, delimiter='/')
aiohttpretty.register_uri('GET', url, status=200, body=body)
result = await connected_provider.metadata(path)
assert len(result) == 4
assert result[0].name == 'level1'
assert result[0].path == '/level1/'
assert result[0].kind == 'folder'
assert result[1].name == 'similar'
assert result[1].path == '/similar'
assert result[1].kind == 'file'
assert result[2].name == 'similar.file'
assert result[2].path == '/similar.file'
assert result[2].kind == 'file'
assert result[3].name == 'level1_empty'
assert result[3].path == '/level1_empty/'
assert result[3].kind == 'folder'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_root_level1(self, connected_provider, folder_root_level1):
path = WaterButlerPath('/level1/')
body = json.dumps(folder_root_level1).encode('utf-8')
url = connected_provider.build_url('', prefix=path.path, delimiter='/')
aiohttpretty.register_uri('GET', url, status=200, body=body)
result = await connected_provider.metadata(path)
assert len(result) == 1
assert result[0].name == 'level2'
assert result[0].path == '/level1/level2/'
assert result[0].kind == 'folder'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_root_level1_level2(self, connected_provider, folder_root_level1_level2):
path = WaterButlerPath('/level1/level2/')
body = json.dumps(folder_root_level1_level2).encode('utf-8')
url = connected_provider.build_url('', prefix=path.path, delimiter='/')
aiohttpretty.register_uri('GET', url, status=200, body=body)
result = await connected_provider.metadata(path)
assert len(result) == 1
assert result[0].name == 'file2.txt'
assert result[0].path == '/level1/level2/file2.txt'
assert result[0].kind == 'file'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file_root_level1_level2_file2_txt(self, connected_provider, file_root_level1_level2_file2_txt):
path = WaterButlerPath('/level1/level2/file2.txt')
url = connected_provider.build_url(path.path)
aiohttpretty.register_uri('HEAD', url, status=200, headers=file_root_level1_level2_file2_txt)
result = await connected_provider.metadata(path)
assert result.name == 'file2.txt'
assert result.path == '/level1/level2/file2.txt'
assert result.kind == 'file'
assert result.content_type == 'text/plain'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_root_level1_empty(self, connected_provider, folder_root_level1_empty):
path = WaterButlerPath('/level1_empty/')
folder_url = connected_provider.build_url('', prefix=path.path, delimiter='/')
folder_body = json.dumps([]).encode('utf-8')
file_url = connected_provider.build_url(path.path.rstrip('/'))
aiohttpretty.register_uri('GET', folder_url, status=200, body=folder_body)
aiohttpretty.register_uri('HEAD', file_url, status=200, headers=folder_root_level1_empty)
result = await connected_provider.metadata(path)
assert result == []
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file_root_similar(self, connected_provider, file_root_similar):
path = WaterButlerPath('/similar')
url = connected_provider.build_url(path.path)
aiohttpretty.register_uri('HEAD', url, status=200, headers=file_root_similar)
result = await connected_provider.metadata(path)
assert result.name == 'similar'
assert result.path == '/similar'
assert result.kind == 'file'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file_root_similar_name(self, connected_provider, file_root_similar_name):
path = WaterButlerPath('/similar.file')
url = connected_provider.build_url(path.path)
aiohttpretty.register_uri('HEAD', url, status=200, headers=file_root_similar_name)
result = await connected_provider.metadata(path)
assert result.name == 'similar.file'
assert result.path == '/similar.file'
assert result.kind == 'file'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file_does_not_exist(self, connected_provider):
path = WaterButlerPath('/does_not.exist')
url = connected_provider.build_url(path.path)
aiohttpretty.register_uri('HEAD', url, status=404)
with pytest.raises(exceptions.MetadataError):
await connected_provider.metadata(path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_does_not_exist(self, connected_provider):
path = WaterButlerPath('/does_not_exist/')
folder_url = connected_provider.build_url('', prefix=path.path, delimiter='/')
folder_body = json.dumps([]).encode('utf-8')
file_url = connected_provider.build_url(path.path.rstrip('/'))
aiohttpretty.register_uri('GET', folder_url, status=200, body=folder_body)
aiohttpretty.register_uri('HEAD', file_url, status=404)
with pytest.raises(exceptions.MetadataError):
await connected_provider.metadata(path)
class TestOperations:
async def test_can_intra_copy(self, connected_provider):
assert connected_provider.can_intra_copy(connected_provider)
async def test_can_intra_move(self, connected_provider):
assert connected_provider.can_intra_move(connected_provider)
| {
"content_hash": "4aac8afcd20b609352f2f67a6b5e0753",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 123,
"avg_line_length": 33.833333333333336,
"alnum_prop": 0.5980938102377382,
"repo_name": "chrisseto/waterbutler",
"id": "b6b3d14deb334c0208be81d9c63d08d300206b8f",
"size": "18676",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/providers/cloudfiles/test_provider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "590943"
}
],
"symlink_target": ""
} |
import os
import pprint
import json
from google.appengine.api import memcache
from google.appengine.api import mail
from google.appengine.api import urlfetch
from google.appengine.ext import db
pprint.pprint(os.environ.copy())
from github.models import GithubWebhook
for i in range(10):
dictionnary = {
'repository': {
'clone_url': 'http://github.com/ThomasMarcel/selection-naturelle.git',
},
'head_commit': {
'author': {'name': 'ThomasMarcel'},
'message': 'Dummy commit message %i' % i,
'url': 'https://github.com/ThomasMarcel/selection-naturelle/commit/344c64c6c40a8abeacfa0a8fb2315789c7cd25df',
},
}
hook = GithubWebhook()
hook.event = 'push'
hook.content = json.dumps(dictionnary)
hook.put()
| {
"content_hash": "c3ab92c4ed50d86f16c0e1adb1d396be",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 121,
"avg_line_length": 28.714285714285715,
"alnum_prop": 0.6616915422885572,
"repo_name": "ThomasMarcel/selection-naturelle",
"id": "d1e4c5428c43050613c88e69d79bee0daebac34f",
"size": "832",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "resources/scripts/populating_githubwebhook.gae.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "891"
},
{
"name": "Python",
"bytes": "58024"
},
{
"name": "Shell",
"bytes": "3593"
}
],
"symlink_target": ""
} |
"""Matplotlib style system.
See the main style user's guide for more information.
"""
__version__ = "$Revision: #1 $"
#===========================================================================
from .MplStyle import MplStyle
from .MplSubStyle import MplSubStyle
from .MplStyleManager import MplStyleManager
# Sub-Styles
from .MplArtistStyle import MplArtistStyle
from .MplAxesStyle import MplAxesStyle
from .MplAxisStyle import MplAxisStyle
from .MplBasicLineStyle import MplBasicLineStyle
from .MplFigureStyle import MplFigureStyle
from .MplFontStyle import MplFontStyle
from .MplLineStyle import MplLineStyle
from .MplMarkerStyle import MplMarkerStyle
from .MplPatchStyle import MplPatchStyle
from .MplTextStyle import MplTextStyle
from .MplTickStyle import MplTickStyle
from . import types
#===========================================================================
# Create a default Matplotlib Style Manager
mgr = MplStyleManager()
| {
"content_hash": "c74d39f3cd139c534dc721204c76d979",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 76,
"avg_line_length": 29.59375,
"alnum_prop": 0.6906019007391764,
"repo_name": "nasa/mplStyle",
"id": "64647dbceb2ad71ed6617224d03d03e012d01a2b",
"size": "2749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mplStyle/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "578438"
}
],
"symlink_target": ""
} |
"""Cover for Shelly."""
from __future__ import annotations
from typing import Any, cast
from aioshelly import Block
from homeassistant.components.cover import (
ATTR_POSITION,
DEVICE_CLASS_SHUTTER,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_STOP,
CoverEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import ShellyDeviceWrapper
from .const import COAP, DATA_CONFIG_ENTRY, DOMAIN
from .entity import ShellyBlockEntity
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up cover for device."""
wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry.entry_id][COAP]
blocks = [block for block in wrapper.device.blocks if block.type == "roller"]
if not blocks:
return
async_add_entities(ShellyCover(wrapper, block) for block in blocks)
class ShellyCover(ShellyBlockEntity, CoverEntity):
"""Switch that controls a cover block on Shelly devices."""
_attr_device_class = DEVICE_CLASS_SHUTTER
def __init__(self, wrapper: ShellyDeviceWrapper, block: Block) -> None:
"""Initialize light."""
super().__init__(wrapper, block)
self.control_result: dict[str, Any] | None = None
self._supported_features: int = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP
if self.wrapper.device.settings["rollers"][0]["positioning"]:
self._supported_features |= SUPPORT_SET_POSITION
@property
def is_closed(self) -> bool:
"""If cover is closed."""
if self.control_result:
return cast(bool, self.control_result["current_pos"] == 0)
return cast(bool, self.block.rollerPos == 0)
@property
def current_cover_position(self) -> int:
"""Position of the cover."""
if self.control_result:
return cast(int, self.control_result["current_pos"])
return cast(int, self.block.rollerPos)
@property
def is_closing(self) -> bool:
"""Return if the cover is closing."""
if self.control_result:
return cast(bool, self.control_result["state"] == "close")
return cast(bool, self.block.roller == "close")
@property
def is_opening(self) -> bool:
"""Return if the cover is opening."""
if self.control_result:
return cast(bool, self.control_result["state"] == "open")
return cast(bool, self.block.roller == "open")
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
async def async_close_cover(self, **kwargs: Any) -> None:
"""Close cover."""
self.control_result = await self.set_state(go="close")
self.async_write_ha_state()
async def async_open_cover(self, **kwargs: Any) -> None:
"""Open cover."""
self.control_result = await self.set_state(go="open")
self.async_write_ha_state()
async def async_set_cover_position(self, **kwargs: Any) -> None:
"""Move the cover to a specific position."""
self.control_result = await self.set_state(
go="to_pos", roller_pos=kwargs[ATTR_POSITION]
)
self.async_write_ha_state()
async def async_stop_cover(self, **_kwargs: Any) -> None:
"""Stop the cover."""
self.control_result = await self.set_state(go="stop")
self.async_write_ha_state()
@callback
def _update_callback(self) -> None:
"""When device updates, clear control result that overrides state."""
self.control_result = None
super()._update_callback()
| {
"content_hash": "25e70d5a9cd74c90d77a893049ce2fad",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 83,
"avg_line_length": 32.63247863247863,
"alnum_prop": 0.642744892613934,
"repo_name": "Danielhiversen/home-assistant",
"id": "73b8b1baae3585be476ebbaf8a01868ab8b71447",
"size": "3818",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/shelly/cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "36870185"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import numpy
import math
execfile(os.path.join(os.path.dirname(__file__), 'rotations.py'))
addPlot(timeWindow=15, yLimits=[-1, 1])
addSignal('EST_ROBOT_STATE', msg.utime, msg.force_torque.l_foot_force_z)
addSignal('EST_ROBOT_STATE', msg.utime, msg.force_torque.r_foot_force_z)
addPlot(timeWindow=15, yLimits=[-1, 1])
addSignal('EST_ROBOT_STATE', msg.utime, msg.force_torque.l_foot_torque_x)
addSignal('EST_ROBOT_STATE', msg.utime, msg.force_torque.l_foot_torque_y)
addSignal('EST_ROBOT_STATE', msg.utime, msg.force_torque.r_foot_torque_x)
addSignal('EST_ROBOT_STATE', msg.utime, msg.force_torque.r_foot_torque_y)
| {
"content_hash": "40120e2d67e7bc7ead4c5e8c53550c9e",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 73,
"avg_line_length": 36.411764705882355,
"alnum_prop": 0.7382875605815832,
"repo_name": "openhumanoids/oh-distro",
"id": "58d6c675721394f87ea83dec7f75d89424ab469b",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "software/config/signal_scope/val/forcetorque.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "131738"
},
{
"name": "C++",
"bytes": "2773796"
},
{
"name": "CMake",
"bytes": "1099155"
},
{
"name": "GLSL",
"bytes": "5320"
},
{
"name": "Java",
"bytes": "233603"
},
{
"name": "JavaScript",
"bytes": "232"
},
{
"name": "M",
"bytes": "3971"
},
{
"name": "Makefile",
"bytes": "82095"
},
{
"name": "Matlab",
"bytes": "1946915"
},
{
"name": "Mercury",
"bytes": "1487"
},
{
"name": "Objective-C",
"bytes": "10657"
},
{
"name": "Pascal",
"bytes": "3353"
},
{
"name": "Perl",
"bytes": "18915"
},
{
"name": "Python",
"bytes": "378988"
},
{
"name": "Shell",
"bytes": "35631"
},
{
"name": "XSLT",
"bytes": "73426"
}
],
"symlink_target": ""
} |
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def test_module(client) -> str:
result = client._http_request('GET', '/usage')
if result:
return 'ok'
else:
return 'Test failed: ' + str(result)
def run_command(client, data: Dict[str, str], endpoint: str, file=None, resp_type='json'):
response = client._http_request('POST', endpoint, data=data, files=file, resp_type=resp_type)
return response
def create_output(results: Dict[str, str], endpoint: str, keyfield=''):
output = CommandResults(
outputs_prefix=f'DeepL.{endpoint}',
outputs_key_field=keyfield,
outputs=results
)
return output
def main():
apikey = demisto.params().get('apikey')
# get the service API url
base_url = urljoin(demisto.params()['url'], '/v2')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
headers = {'Authorization': f'DeepL-Auth-Key {apikey}'}
demisto.info(f'Command being called is {demisto.command()}')
try:
client = BaseClient(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
args = demisto.args()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'deepl-usage':
data: Dict[str, str] = {}
results = run_command(client, data, '/usage')
return_results(create_output(results, 'Usage'))
elif demisto.command() == 'deepl-translate-text':
results = run_command(client, args, '/translate')
return_results(create_output(results.get('translations'), 'TranslatedText'))
elif demisto.command() == 'deepl-submit-document':
data = args
filename = demisto.args().get('file')
data.pop('file')
file = demisto.getFilePath(filename)
with open(file['path'], 'rb') as open_file:
results = run_command(client, data, '/document', {'file': (file['name'], open_file)})
return_results(create_output(results, 'DocumentSubmission'))
elif demisto.command() == 'deepl-check-document-status':
data = {'document_key': args.get('document_key')}
document_id = args.get('document_id')
results = run_command(client, data, f'/document/{document_id}')
return_results(create_output(results, 'DocumentStatus', 'document_id'))
elif demisto.command() == 'deepl-get-document':
data = {'document_key': args.get('document_key')}
document_id = args.get('document_id')
filename = args.get('filename')
results = run_command(client, data, f'/document/{document_id}/result', resp_type='content')
return_results(fileResult(filename, results, file_type=EntryType.ENTRY_INFO_FILE))
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| {
"content_hash": "9b3b2179505f84d59b6f7d78ca0d47d7",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 103,
"avg_line_length": 38.88235294117647,
"alnum_prop": 0.6033282904689864,
"repo_name": "demisto/content",
"id": "48b50d806492d4fceaed0b239087fa6cf6681d10",
"size": "3305",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/DeepL/Integrations/DeepL/DeepL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47881712"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
"""
Sample from low-discrepancy sequences.
"""
# future imports
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
# local imports
from ._sobol import i4_sobol_generate
# global imports
import numpy as np
# exported symbols
__all__ = ['rstate', 'uniform', 'latin', 'sobol', 'grid']
def rstate(rng=None):
"""
Return a numpy RandomState object. If an integer value is given then a new
RandomState will be returned with this seed. If None is given then the
global numpy state will be returned. If an already instantiated state is
given this will be passed back.
"""
if rng is None:
return np.random.mtrand._rand
elif isinstance(rng, np.random.RandomState):
return rng
elif isinstance(rng, int):
return np.random.RandomState(rng)
raise ValueError('unknown seed given to rstate')
def uniform(bounds, n, rng=None):
"""
Sample n points uniformly at random from the specified region, given by
a list of [(lo,hi), ..] bounds in each dimension.
"""
# if given a seed or an instantiated RandomState make sure that we use
# it here, but also within the sample_spectrum code.
rng = rstate(rng)
bounds = np.array(bounds, ndmin=2, copy=False)
# generate the random values.
d = len(bounds)
w = bounds[:, 1] - bounds[:, 0]
X = bounds[:, 0] + w * rng.rand(n, d)
return X
def latin(bounds, n, rng=None):
"""
Sample n points from a latin hypercube within the specified region, given
by a list of [(lo,hi), ..] bounds in each dimension.
"""
rng = rstate(rng)
bounds = np.array(bounds, ndmin=2, copy=False)
# generate the random samples.
d = len(bounds)
w = bounds[:, 1] - bounds[:, 0]
X = bounds[:, 0] + w * (np.arange(n)[:, None] + rng.rand(n, d)) / n
# shuffle each dimension.
for i in xrange(d):
X[:, i] = rng.permutation(X[:, i])
return X
def sobol(bounds, n, rng=None):
"""
Sample n points from a sobol sequence within the specified region, given by
a list of [(lo,hi), ..] bounds in each dimension.
"""
rng = rstate(rng)
bounds = np.array(bounds, ndmin=2, copy=False)
# generate the random samples.
d = len(bounds)
skip = rng.randint(100, 200)
w = bounds[:, 1] - bounds[:, 0]
X = bounds[:, 0] + w * i4_sobol_generate(d, n, skip).T
return X
def grid(bounds, n):
"""
Generate a regular grid within the specified region, given by `bounds`,
a list of [(lo,hi), ..] bounds in each dimension. `n` represents the number
of points along each dimension.
"""
bounds = np.array(bounds, ndmin=2, copy=False)
d = len(bounds)
if d == 1:
X = np.linspace(bounds[0, 0], bounds[0, 1], n)
X = np.reshape(X, (-1, 1))
else:
X = np.meshgrid(*(np.linspace(a, b, n) for a, b in bounds))
X = np.reshape(X, (d, -1)).T
return X
| {
"content_hash": "00be3048bf3180dbf942ef14e1313cdc",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 79,
"avg_line_length": 27.69158878504673,
"alnum_prop": 0.6176172797840027,
"repo_name": "mwhoffman/mwhutils",
"id": "c445b3e85a35080ab23d16273c4b420bbf778873",
"size": "2963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mwhutils/random/random.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "249048"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views import generic
from django.core.urlresolvers import reverse
from itertools import chain
from operator import attrgetter
from datetime import datetime
from main.forms import UserEventCreate
from main.models import Event, UserEvent, Organization
def home(request):
return render(request, 'main/home.html')
def list_events_one(request):
return list_events(request, 1)
def list_events(request, page):
filter_dict, filters = {}, {}
if request.GET.get('range'):
if not request.user.is_anonymous():
if request.user.user_profile.geo_lat:
dist = request.GET.get('range')
set = Event.objects.within(request.user.user_profile, float(dist))
set = set.filter(date_start__gte=timezone.now()).order_by('date_start')
if float(dist) == 1.0:
mi = ' mile'
else:
mi = ' miles'
filters['Search radius: ' + str(dist) + mi] = 'range=' + dist
else:
messages.error(request, "You don't have a location set! <a href='/profile/change_loc?next=" + reverse('main:list_events') + "'>Set one now</a>",
extra_tags='safe')
set = Event.objects.filter(date_start__gte=timezone.now()).order_by('date_start')
else:
set = Event.objects.filter(date_start__gte=timezone.now()).order_by('date_start')
else:
set = Event.objects.filter(date_start__gte=timezone.now()).order_by('date_start')
for k in request.GET:
if k == 'range':
pass
else:
v = request.GET.get(k)
if 'organization_id' in k:
filters["Organization: " + str(Organization.objects.get(pk=v).name)] = k + '=' + v
elif 'organization__name' in k:
filters["Organization contains: " + v] = k + '=' + v
elif 'name' in k:
filters["Name contains: " + v] = k + '=' + v
elif 'date' in k:
raw_date = v.split('/')
try:
v = datetime(int(raw_date[2]), int(raw_date[0]), int(raw_date[1]))
if k == 'date_start__gte':
filters["Date after: " + v] = k + '=' + v
elif k == 'date_start__lte':
filters["Date before: " + v] = k + '=' + v
except:
messages.error(request, 'Invalid date!')
continue
filter_dict[k] = v
set = set.filter(**filter_dict)
paginator = Paginator(set, 10, allow_empty_first_page=True)
try:
page_set = paginator.page(page)
except PageNotAnInteger:
page_set = paginator.page(1)
except EmptyPage:
messages.error(request, "That page was not found!")
return HttpResponseRedirect('/')
if not page_set.object_list.exists():
messages.error(request, "No events found!")
return render(request, 'main/list_events.html', {'events': page_set, 'filters': filters})
class EventView(generic.DetailView):
model = Event
template = 'main/event_detail.html'
def organization_detail(request, pk):
o = get_object_or_404(Organization.objects, pk=pk)
recent_events = list(o.events.filter(date_start__gte=timezone.now()).order_by('date_start')[:5])
return render(request, 'main/org_detail.html', {'organization': o, 'recent_events': recent_events})
@login_required
def userevent_detail(request, pk):
e = get_object_or_404(UserEvent.objects, pk=pk)
if request.user.id == e.user_id:
return render(request, 'main/userevent_detail.html', {'userevent': e})
messages.error(request, "That's not your event!")
return HttpResponseRedirect('/')
@login_required
def delete_userevent(request, pk):
event = UserEvent.objects.get(pk=pk)
if event:
if request.user.id == event.user_id:
event.delete()
messages.info(request, "Event successfully deleted")
else:
messages.error(request, "You aren't authorized to do that!")
else:
messages.error(request, "Event not found!")
if request.GET.get('next'):
return HttpResponseRedirect(request.GET.get('next'))
return HttpResponseRedirect('/')
@login_required
def track_events(request):
event = list(request.user.events.all())
user_event = list(request.user.user_events.all())
event_set = sorted(chain(event, user_event),
key=attrgetter('date_end'))
total_hours = 0
for i in event_set:
total_hours += i.hours()
if request.method == "POST":
form = UserEventCreate(user=request.user, data=request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Event created successfully')
return HttpResponseRedirect(reverse('main:track'))
else:
messages.error(request, 'Error creating event')
else:
form = UserEventCreate()
return render(request, 'main/track_events.html', {'events': event_set,
'total_hours': total_hours,
'form': form})
| {
"content_hash": "ee5187328ef3da7b4b8a248f652e7604",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 160,
"avg_line_length": 40.955882352941174,
"alnum_prop": 0.5870736086175943,
"repo_name": "mattr555/AtYourService",
"id": "d96edbc10f7b31cc3eac72bc60f6ec00f8d15dfb",
"size": "5570",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "main/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1398"
},
{
"name": "CoffeeScript",
"bytes": "5788"
},
{
"name": "JavaScript",
"bytes": "7754"
},
{
"name": "Python",
"bytes": "95284"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from .common import InfoExtractor
class EbaumsWorldIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ebaumsworld\.com/videos/[^/]+/(?P<id>\d+)'
_TEST = {
'url': 'http://www.ebaumsworld.com/videos/a-giant-python-opens-the-door/83367677/',
'info_dict': {
'id': '83367677',
'ext': 'mp4',
'title': 'A Giant Python Opens The Door',
'description': 'This is how nightmares start...',
'uploader': 'jihadpizza',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
config = self._download_xml(
'http://www.ebaumsworld.com/video/player/%s' % video_id, video_id)
video_url = config.find('file').text
return {
'id': video_id,
'title': config.find('title').text,
'url': video_url,
'description': config.find('description').text,
'thumbnail': config.find('image').text,
'uploader': config.find('username').text,
}
| {
"content_hash": "65bca6347810b6ea962bd8572d00266f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 91,
"avg_line_length": 32.90909090909091,
"alnum_prop": 0.5377532228360957,
"repo_name": "lodemo/CATANA",
"id": "c97682cd367edebfd9fc6a476ad073cb03240054",
"size": "1086",
"binary": false,
"copies": "87",
"ref": "refs/heads/master",
"path": "src/face_recognition/youtube_dl/extractor/ebaumsworld.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "4068"
},
{
"name": "HTML",
"bytes": "755393"
},
{
"name": "JavaScript",
"bytes": "1451186"
},
{
"name": "Jupyter Notebook",
"bytes": "12442842"
},
{
"name": "MATLAB",
"bytes": "29584"
},
{
"name": "Python",
"bytes": "5006823"
},
{
"name": "Shell",
"bytes": "154"
}
],
"symlink_target": ""
} |
import pygame
from pygame.locals import *
import button
from consts import *
class MessageBox(object):
def __init__(self, surface, MenuInfo):
self.surface = surface
self.colour = MenuInfo['colour1']
self.light_colour = MenuInfo['colour2']
self.caption = MenuInfo['caption']
self.desc = MenuInfo['desc']
self._subtext = None
self._subtextpos = None
self._bright_border = None
self._light_border = None
self._bg = None
self._text = None
self._textpos = None
self._create_menu()
def _create_menu(self):
self._bg = pygame.Surface((self.surface.get_width(), 110))
self._bg.fill((16, 16, 16))
self._bg = self._bg.convert()
self._bright_border = pygame.Surface((self.surface.get_width(), 10))
self._bright_border.fill(self.colour)
self._bright_border = self._bright_border.convert()
self._light_border = pygame.Surface((self._bright_border.get_size()))
self._light_border.fill(self.light_colour)
self._light_border = self._light_border.convert()
if pygame.font:
font = pygame.font.Font(DEF_FONT, 45)
self._text = font.render(self.caption, True, self.colour, PAUSE_BG)
self._text.set_colorkey(PAUSE_BG)
self._text = self._text.convert()
x = self.surface.get_width()/2
self._textpos = self._text.get_rect(centerx=x, y=120)
font = pygame.font.Font(DEF_FONT, 15)
self._subtext = font.render(self.desc, True, WHITE, PAUSE_BG)
self._subtext.set_colorkey(PAUSE_BG)
self._subtext = self._subtext.convert()
self._subtextpos = self._subtext.get_rect(centerx=x, y=175)
def show(self):
for x in range(3):
self.surface.blit(self._bg, (0,100))
self.surface.blit(self._bright_border, (0, 100))
self.surface.blit(self._light_border, (0,110))
self.surface.blit(self._light_border, (0, 210))
self.surface.blit(self._bright_border, (0, 220))
self.surface.blit(self._text, self._textpos)
self.surface.blit(self._subtext, self._subtextpos)
pygame.display.flip()
class ButtonMsg(MessageBox):
def __init__(self, surface, MenuInfo, B1_Info, B2_Info=None):
super(ButtonMsg,self).__init__(surface, MenuInfo)
self.b1_info = B1_Info
self.b2_info = B2_Info
self.b1_rect = B1_Info['rect']
if B2_Info:
self.b2_rect = B2_Info['rect']
self.b1 = None
self.b2 = None
self.initialize()
def initialize(self):
self.b1 = button.Button(self.surface, self.b1_info)
if self.b2_info:
self.b2 = button.Button(self.surface, self.b2_info)
def show(self):
super(ButtonMsg, self).show()
self.b1.show()
if self.b2:
self.b2.show()
def handle_event(self, event):
if event.type not in [MOUSEMOTION, MOUSEBUTTONDOWN, MOUSEBUTTONUP]:
return
elif self.b1_rect.collidepoint(event.pos):
if self.b2:
self.b2.handle_event()
self.b1.handle_event(event)
elif self.b2:
if self.b2_rect.collidepoint(event.pos):
self.b1.handle_event()
self.b2.handle_event(event)
else:
self.b1.handle_event()
if self.b2:
self.b2.handle_event()
| {
"content_hash": "5d717f914e42196de77975bf15686099",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 35.20754716981132,
"alnum_prop": 0.5385852090032154,
"repo_name": "FireBl0w/breakout-clone",
"id": "f6993afb66bb8116ab6e3dfc2351925f2799d686",
"size": "3732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "menu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35661"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.append("../../../../")
import swhlab
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
import time
class ABF2(swhlab.ABF):
def phasicTonic(self,m1=None,m2=None,chunkMs=50,quietPercentile=10,
histResolution=.5,plotToo=False,rmsExpected=5):
"""
chunkMs should be ~50 ms or greater.
bin sizes must be equal to or multiples of the data resolution.
transients smaller than the expected RMS will be silenced.
"""
# prepare sectioning values to be used later
m1=0 if m1 is None else m1*self.pointsPerSec
m2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSec
m1,m2=int(m1),int(m2)
# prepare histogram values to be used later
padding=200 # pA or mV of maximum expected deviation
chunkPoints=int(chunkMs*self.pointsPerMs)
histBins=int((padding*2)/histResolution)
# center the data at 0 using peak histogram, not the mean
Y=self.sweepY[m1:m2]
hist,bins=np.histogram(Y,bins=2*padding)
Yoffset=bins[np.where(hist==max(hist))[0][0]]
Y=Y-Yoffset # we don't have to, but PDF math is easier
# calculate all histogram
nChunks=int(len(Y)/chunkPoints)
hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))
hist=hist/len(Y) # count as a fraction of total
Xs=bins[1:]
# get baseline data from chunks with smallest variance
chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))
variances=np.var(chunks,axis=1)
percentiles=np.empty(len(variances))
for i,variance in enumerate(variances):
percentiles[i]=sorted(variances).index(variance)/len(variances)*100
blData=chunks[np.where(percentiles<=quietPercentile)[0]].flatten()
# generate the standard curve and pull it to the histogram height
sigma=np.sqrt(np.var(blData))
center=np.average(blData)+histResolution/2
blCurve=mlab.normpdf(Xs,center,sigma)
blCurve=blCurve*max(hist)/max(blCurve)
# determine the phasic current by subtracting-out the baseline
diff=hist-blCurve
# manually zero-out data which we expect to be within the RMS range
ignrCenter=len(Xs)/2
ignrPad=rmsExpected/histResolution
ignr1,ignt2=int(ignrCenter-ignrPad),int(ignrCenter+ignrPad)
diff[ignr1:ignt2]=0
return diff/len(Y)*abf.pointsPerSec # charge/sec
if __name__=="__main__":
#abfPath=r"X:\Data\2P01\2016\2016-09-01 PIR TGOT"
abfPath=r"C:\Users\scott\Documents\important\demodata"
abf=ABF2(os.path.join(abfPath,"16d14036.abf"))
t=time.perf_counter()
Xs=np.arange(abf.sweeps)*abf.sweepLength
pos,neg=np.zeros(len(Xs)),np.zeros(len(Xs))
for sweep in abf.setsweeps():
phasic=abf.phasicTonic(.75)
neg[sweep],pos[sweep]=np.sum(np.split(phasic,2),1)
t=time.perf_counter()-t
plt.figure(figsize=(10,5))
plt.grid()
plt.title("analysis of %s completed in %.02f S"%(abf.ID,t))
plt.plot(Xs,pos,'.',color='b',alpha=.3)
plt.plot(Xs,swhlab.common.lowpass(pos),'-',color='b',alpha=.5,lw=5,label="upward")
plt.plot(Xs,neg,'.',color='r',alpha=.3)
plt.plot(Xs,swhlab.common.lowpass(neg),'-',color='r',alpha=.5,lw=5,label="downward")
for sweep in abf.comment_times:
plt.axvline(sweep,lw=5,alpha=.5,color='g',ls='--')
plt.axhline(0,color='k',lw=3,alpha=.5)
plt.xlabel("time (secods)")
plt.ylabel("ms * pA / sec")
plt.legend(loc='upper left',shadow=True)
plt.margins(0,.1)
plt.show()
print("DONE") | {
"content_hash": "36befa587426c55256ed321b1ad27f70",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 88,
"avg_line_length": 39.208333333333336,
"alnum_prop": 0.6331030818278427,
"repo_name": "swharden/SWHLab",
"id": "4d16505039e1c4e10ab702fc1687767fc31fc1cf",
"size": "3764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/uses/EPSCs-and-IPSCs/variance method/2016-12-17 02 graphTime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "774"
},
{
"name": "HTML",
"bytes": "7797"
},
{
"name": "Jupyter Notebook",
"bytes": "54914"
},
{
"name": "Python",
"bytes": "149737"
}
],
"symlink_target": ""
} |
from actstream.actions import follow, unfollow
from actstream.models import Follow
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.db.models.signals import (
m2m_changed,
post_save,
pre_delete,
pre_save,
)
from django.dispatch import receiver
from guardian.utils import get_anonymous_user
from machina.apps.forum.models import Forum
from machina.apps.forum_conversation.models import Topic
from grandchallenge.algorithms.models import (
Algorithm,
AlgorithmPermissionRequest,
)
from grandchallenge.archives.models import Archive, ArchivePermissionRequest
from grandchallenge.cases.models import RawImageUploadSession
from grandchallenge.challenges.models import Challenge
from grandchallenge.core.utils import disable_for_loaddata
from grandchallenge.evaluation.models import Evaluation, Phase, Submission
from grandchallenge.notifications.models import Notification, NotificationType
from grandchallenge.participants.models import RegistrationRequest
from grandchallenge.reader_studies.models import (
ReaderStudy,
ReaderStudyPermissionRequest,
)
@receiver(post_save, sender=get_user_model())
@disable_for_loaddata
def add_user_to_groups(
instance: get_user_model = None, created: bool = False, *_, **__
):
if created:
g_reg_anon, _ = Group.objects.get_or_create(
name=settings.REGISTERED_AND_ANON_USERS_GROUP_NAME
)
instance.groups.add(g_reg_anon)
try:
anon_pk = get_anonymous_user().pk
except ObjectDoesNotExist:
# Used for the next if statement, as anon does not exist
# this user is not anonymous
anon_pk = None
if instance.pk != anon_pk:
g_reg, _ = Group.objects.get_or_create(
name=settings.REGISTERED_USERS_GROUP_NAME
)
instance.groups.add(g_reg)
@receiver(pre_save, sender=AlgorithmPermissionRequest)
@receiver(pre_save, sender=ArchivePermissionRequest)
@receiver(pre_save, sender=ReaderStudyPermissionRequest)
def process_permission_request_update(sender, instance, *_, **__):
try:
old_values = sender.objects.get(pk=instance.pk)
except ObjectDoesNotExist:
old_values = None
old_status = old_values.status if old_values else None
if instance.status != old_status:
if instance.status == instance.ACCEPTED:
instance.add_method(instance.user)
Notification.send(
type=NotificationType.NotificationTypeChoices.REQUEST_UPDATE,
message="was accepted",
target=instance,
)
elif instance.status == instance.REJECTED:
instance.remove_method(instance.user)
Notification.send(
type=NotificationType.NotificationTypeChoices.REQUEST_UPDATE,
message="was rejected",
target=instance,
)
@receiver(m2m_changed, sender=Group.user_set.through)
def update_editor_follows( # noqa: C901
instance, action, reverse, model, pk_set, **_
): # noqa: C901
if action not in ["post_add", "pre_remove", "pre_clear"]:
# nothing to do for the other actions
return
if reverse:
groups = [instance]
if pk_set is None:
users = instance.user_set.all()
else:
users = model.objects.filter(pk__in=pk_set).all()
else:
if pk_set is None:
groups = instance.groups.all()
else:
groups = model.objects.filter(pk__in=pk_set).all()
users = [instance]
follow_objects = []
for group in groups:
if hasattr(group, "editors_of_algorithm"):
follow_objects.append(group.editors_of_algorithm)
elif hasattr(group, "editors_of_archive"):
follow_objects.append(group.editors_of_archive)
elif hasattr(group, "editors_of_readerstudy"):
follow_objects.append(group.editors_of_readerstudy)
elif hasattr(group, "admins_of_challenge"):
# NOTE: only admins of a challenge should follow a challenge
# and its phases
follow_objects.append(group.admins_of_challenge)
for phase in group.admins_of_challenge.phase_set.all():
follow_objects.append(phase)
for user in users:
for obj in follow_objects:
if action == "post_add" and obj._meta.model_name != "algorithm":
follow(
user=user, obj=obj, actor_only=False, send_action=False,
)
# only new admins of a challenge get notified
if obj._meta.model_name == "challenge":
Notification.send(
type=NotificationType.NotificationTypeChoices.NEW_ADMIN,
message="added as admin for",
action_object=user,
target=obj,
)
elif action == "post_add" and obj._meta.model_name == "algorithm":
follow(
user=user,
obj=obj,
actor_only=False,
flag="access_request",
send_action=False,
)
elif action == "pre_remove" or action == "pre_clear":
unfollow(user=user, obj=obj, send_action=False)
@receiver(pre_delete, sender=get_user_model())
def clean_up_user_follows(instance, **_):
ct = ContentType.objects.filter(
app_label=instance._meta.app_label, model=instance._meta.model_name
).get()
Follow.objects.filter(
Q(object_id=instance.pk) | Q(user=instance.pk), content_type=ct
).delete()
Notification.objects.filter(
Q(actor_object_id=instance.pk) & Q(actor_content_type=ct)
| Q(action_object_object_id=instance.pk)
& Q(action_object_content_type=ct)
| Q(target_object_id=instance.pk) & Q(target_content_type=ct)
| Q(user_id=instance.pk)
).delete()
@receiver(pre_delete, sender=AlgorithmPermissionRequest)
@receiver(pre_delete, sender=ReaderStudyPermissionRequest)
@receiver(pre_delete, sender=ArchivePermissionRequest)
@receiver(pre_delete, sender=Archive)
@receiver(pre_delete, sender=Algorithm)
@receiver(pre_delete, sender=ReaderStudy)
@receiver(pre_delete, sender=Challenge)
@receiver(pre_delete, sender=Forum)
@receiver(pre_delete, sender=Topic)
@receiver(pre_delete, sender=RegistrationRequest)
@receiver(pre_delete, sender=Evaluation)
@receiver(pre_delete, sender=Phase)
@receiver(pre_delete, sender=Submission)
@receiver(pre_delete, sender=RawImageUploadSession)
def clean_up_notifications(instance, **_):
ct = ContentType.objects.filter(
app_label=instance._meta.app_label, model=instance._meta.model_name
).get()
Notification.objects.filter(
Q(actor_object_id=instance.pk) & Q(actor_content_type=ct)
| Q(action_object_object_id=instance.pk)
& Q(action_object_content_type=ct)
| Q(target_object_id=instance.pk) & Q(target_content_type=ct)
).delete()
| {
"content_hash": "b132695301a6a0507189880832d5158a",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 80,
"avg_line_length": 37.87564766839378,
"alnum_prop": 0.6493844049247606,
"repo_name": "comic/comic-django",
"id": "62139393168f2d911a4d8d7d4a5504b9fe64a809",
"size": "7310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/grandchallenge/core/signals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "94300"
},
{
"name": "HTML",
"bytes": "101108"
},
{
"name": "JavaScript",
"bytes": "122734"
},
{
"name": "PHP",
"bytes": "99155"
},
{
"name": "Python",
"bytes": "486219"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
} |
"""
WSGI config for lakalici project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lakalici.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "80bab2191feab93553e51323e26ebe1d",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.928571428571427,
"alnum_prop": 0.7749360613810742,
"repo_name": "yeleman/lakalici",
"id": "032e21141de741445cb0b68b5f774ccbb49a312f",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lakalici/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1853"
},
{
"name": "HTML",
"bytes": "29982"
},
{
"name": "Python",
"bytes": "67187"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django.conf.urls import url, include
from django.views.generic import (
CreateView,
DeleteView,
DetailView,
ListView,
UpdateView,
)
from django.forms.models import model_to_dict
class DedalGenericTemplatesMixin(object):
def get_template_names(self):
names = super(DedalGenericTemplatesMixin, self).get_template_names()
names.append('dedal/generic{}.html'.format(self.template_name_suffix))
return names
class Dedal(object):
def __init__(self, model, actions):
self.actions = actions
self.model = model
self.model_name = self.model._meta.model_name
def list(self):
class View(DedalGenericTemplatesMixin, ListView):
template_name_suffix = '_list'
model = self.model
def get_context_data(self, **kwargs):
context = super(View, self).get_context_data(**kwargs)
context['model'] = self.model
context['verbose_name'] = self.model._meta.verbose_name
return context
return View.as_view()
def read(self):
class View(DedalGenericTemplatesMixin, DetailView):
template_name_suffix = '_detail'
model = self.model
def get_context_data(self, *args, **kwargs):
context = super(View, self).get_context_data(*args, **kwargs)
context['fields'] = model_to_dict(self.object)
return context
return View.as_view()
def update(self):
model_name = self.model_name
class View(DedalGenericTemplatesMixin, UpdateView):
template_name_suffix = '_form'
model = self.model
# todo: black list
fields = self.model._meta.get_all_field_names()
def get_success_url(self):
return reverse(
'{}_read'.format(model_name), args=(self.object.pk,)
)
return View.as_view()
def create(self):
model_name = self.model_name
class View(DedalGenericTemplatesMixin, CreateView):
template_name_suffix = '_form'
model = self.model
# todo: black list
fields = self.model._meta.get_all_field_names()
def get_success_url(self):
return reverse(
'{}_read'.format(model_name), args=(self.object.pk,)
)
return View.as_view()
def delete(self):
model_name = self.model_name
class View(DedalGenericTemplatesMixin, DeleteView):
template_name_suffix = '_delete'
model = self.model
# todo: black list
fields = self.model._meta.get_all_field_names()
def get_success_url(self):
return reverse('{}_list'.format(model_name))
return View.as_view()
@property
def urls(self):
return [
url(
r'^$',
self.list(),
name='{}_list'.format(self.model_name)
),
url(
r'^create/$',
self.create(),
name='{}_create'.format(self.model_name)
),
url(
r'^(?P<pk>\d+)/$',
self.read(),
name='{}_read'.format(self.model_name)
),
url(
r'^(?P<pk>\d+)/update/$',
self.update(),
name='{}_update'.format(self.model_name)
),
url(
r'^(?P<pk>\d+)/delete/$',
self.delete(),
name='{}_delete'.format(self.model_name)
),
]
class DedalSite(object):
def __init__(self):
self._register = {}
def register(self, model, actions):
print('register', model, actions)
self._register[model] = Dedal(model, actions)
def get_urls(self):
urlpatterns = []
for model, dedal in self._register.items():
urlpatterns += [
url(r'^{}/'.format(model.__name__.lower()), include(dedal.urls))
]
return urlpatterns
@property
def urls(self):
return self.get_urls()
site = DedalSite()
| {
"content_hash": "92743e526a268f4cca128d38171c4874",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 80,
"avg_line_length": 29.717241379310344,
"alnum_prop": 0.5223949872360176,
"repo_name": "vi4m/django-dedal",
"id": "218630413d9521a61f93f57e8e5501ec9357a6d5",
"size": "4309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dedal/site.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1409"
},
{
"name": "Makefile",
"bytes": "1233"
},
{
"name": "Python",
"bytes": "12988"
}
],
"symlink_target": ""
} |
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.distributed.ClockDelta import globalClockDelta
import time
class TimeManagerAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("TimeManagerAI")
def requestServerTime(self, context):
self.sendUpdateToAvatarId(self.air.getAvatarIdFromSender(),
'serverTime', [context,
globalClockDelta.getRealNetworkTime(bits=32),
int(time.time())])
def setDisconnectReason(self, reason):
avId = self.air.getAvatarIdFromSender()
self.air.writeServerEvent('disconnect-reason', avId=avId, reason=reason)
def setExceptionInfo(self, exception):
avId = self.air.getAvatarIdFromSender()
self.air.writeServerEvent('client-exception', avId=avId, exception=exception)
def setSignature(self, todo0, todo1, todo2):
pass
def setFrameRate(self, todo0, todo1, todo2, todo3, todo4, todo5, todo6, todo7, todo8, todo9, todo10, todo11, todo12, todo13, todo14, todo15, todo16, todo17):
pass
def setCpuInfo(self, todo0, todo1):
pass
def checkForGarbageLeaks(self, todo0):
pass
def setNumAIGarbageLeaks(self, todo0):
pass
def setClientGarbageLeak(self, todo0, todo1):
pass
def checkAvOnDistrict(self, todo0, todo1):
pass
| {
"content_hash": "224203ba195b5a27dc21e15471d68443",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 161,
"avg_line_length": 36.30952380952381,
"alnum_prop": 0.6649180327868852,
"repo_name": "silly-wacky-3-town-toon/SOURCE-COD",
"id": "1d8d020199486ae403e71244f0f14e00bbc9eaa9",
"size": "1525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "otp/ai/TimeManagerAI.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10249"
},
{
"name": "C",
"bytes": "1752256"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5485400"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009050"
},
{
"name": "Objective-C",
"bytes": "21821"
},
{
"name": "PLSQL",
"bytes": "10200"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "33566014"
},
{
"name": "Shell",
"bytes": "14642"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
} |
'''This module provides a class for Balances calls to the CC API'''
from currencycloud.http import Http
from currencycloud.resources import PaginatedCollection, Balance, MarginBalanceTopUp
class Balances(Http):
'''This class provides an interface to the Balances endpoints of the CC API'''
def for_currency(self, currency, **kwargs):
'''
Provides the balance for a currency and shows the date that the balance was last updated.
'''
return Balance(self, **self.get('/v2/balances/' + currency, query=kwargs))
def find(self, **kwargs):
'''
Search for a range of balances and receive a paged response. This is useful if you want to
see historic balances.
'''
response = self.get('/v2/balances/find', query=kwargs)
data = [Balance(self, **fields) for fields in response['balances']]
return PaginatedCollection(data, response['pagination'])
def top_up_margin(self, **kwargs):
'''
Provides the balance for a currency and shows the date that the balance was last updated.
'''
return MarginBalanceTopUp(self, **self.post('/v2/balances/top_up_margin', kwargs))
def first(self, **params):
params['per_page'] = 1
return self.find(**params)[0]
| {
"content_hash": "69ad3111e91e66b00366b18bc6d0e0df",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 98,
"avg_line_length": 39.24242424242424,
"alnum_prop": 0.6548262548262548,
"repo_name": "CurrencyCloud/currencycloud-python",
"id": "3c6721f94b95f6af44d07e7ac7a29cfc73daf12c",
"size": "1295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/currencycloud/clients/balances.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "178019"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "dispel4py",
version = "0.0.1",
author = "The University of Edinburgh",
author_email = "[email protected]",
description = ("Dispel4py is a Python library used to describe abstract workflows for distributed data-intensive applications."),
license = "Apache 2",
keywords = "dispel4py dispel workflows processing elements",
url = "https://github.com/akrause2014/dispel4py",
packages=['dispel4py', 'dispel4py.new', 'dispel4py.seismo', 'dispel4py.storm', 'dispel4py.examples', 'dispel4py.examples.graph_testing'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: Apache 2 License",
],
)
| {
"content_hash": "2eab801a1612870dc452083a6446a87b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 141,
"avg_line_length": 41.18518518518518,
"alnum_prop": 0.6807553956834532,
"repo_name": "akrause2014/dispel4py",
"id": "b66a2bb0b470548bbb49e41c73ffc9593180ce49",
"size": "1707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "2036"
},
{
"name": "Python",
"bytes": "295425"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.auth.views import LoginView
from django.urls import include, path, re_path
from . import views
from .models import NonAsciiRepr
urlpatterns = [
re_path(
r"^resolving1/(.+)/(.+)/$", views.resolving_view, name="positional-resolving"
),
re_path(r"^resolving2/(?P<arg1>.+)/(?P<arg2>.+)/$", views.resolving_view),
re_path(r"^resolving3/(.+)/$", views.resolving_view, {"arg2": "default"}),
re_path(r"^regular/(?P<title>.*)/$", views.regular_view),
re_path(r"^template_response/(?P<title>.*)/$", views.template_response_view),
re_path(r"^regular_jinja/(?P<title>.*)/$", views.regular_jinjia_view),
path("non_ascii_request/", views.regular_view, {"title": NonAsciiRepr()}),
path("new_user/", views.new_user),
path("execute_sql/", views.execute_sql),
path("cached_view/", views.cached_view),
path("cached_low_level_view/", views.cached_low_level_view),
path("json_view/", views.json_view),
path("redirect/", views.redirect_view),
path("login_without_redirect/", LoginView.as_view(redirect_field_name=None)),
path("admin/", admin.site.urls),
path("__debug__/", include("debug_toolbar.urls")),
]
| {
"content_hash": "4c438ed8fd6a1dded2cc9f6badda34bd",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 85,
"avg_line_length": 45.03703703703704,
"alnum_prop": 0.6513157894736842,
"repo_name": "spookylukey/django-debug-toolbar",
"id": "c12fc744afdcbd21408ed4dd18dbda91841fd5da",
"size": "1216",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11912"
},
{
"name": "HTML",
"bytes": "30884"
},
{
"name": "JavaScript",
"bytes": "23780"
},
{
"name": "Jinja",
"bytes": "88"
},
{
"name": "Makefile",
"bytes": "1161"
},
{
"name": "Python",
"bytes": "247769"
}
],
"symlink_target": ""
} |
from whacked4 import config, utils
from whacked4.ui import editormixin, windows
import wx
class SoundsFrame(editormixin.EditorMixin, windows.SoundsFrameBase):
"""
Sounds editor window.
"""
# The colour used for color-coding priorities.
PRIORITY_COLOUR = wx.Colour(red=255, green=48, blue=0)
UNUSED_TEXT_COLOUR = wx.Colour(red=127, green=127, blue=127)
UNUSED_BACKGROUND_COLOUR = wx.Colour(red=243, green=243, blue=243)
def __init__(self, parent):
windows.SoundsFrameBase.__init__(self, parent)
editormixin.EditorMixin.__init__(self)
# A list of all tool windows for simple mass operations.
self.WINDOWS_TOOLS = [
self.Priority,
self.PrioritySpinner,
self.Singular,
self.Restore
]
self.SetIcon(wx.Icon('res/editor-sounds.ico'))
self.priority_colours = []
self.build_colours()
self.patch = None
self.pwads = None
self.selected_index = -1
self.selected_row = -1
def activate(self, event):
"""
Called when this editor window is activated by the user.
"""
# Call the editor mixin function that we are overriding.
editormixin.EditorMixin.activate(self, event)
if not self:
return
# Update sound names only.
self.SoundList.SetItem(0, 1, self.patch.get_sound_name(0))
for index, sound in enumerate(self.patch.sounds):
self.SoundList.SetItem(index + 1, 1, self.patch.get_sound_name(index + 1))
def build_colours(self):
"""
Builds priority colour coding colours and blends them with the system's window background color.
"""
sys_col = self.SoundList.GetBackgroundColour()
for index in range(4):
factor = 0.06 * index
sys_factor = 1 - factor
colour = wx.Colour(
int(self.PRIORITY_COLOUR.Red() * factor + sys_col.Red() * sys_factor),
int(self.PRIORITY_COLOUR.Green() * factor + sys_col.Green() * sys_factor),
int(self.PRIORITY_COLOUR.Blue() * factor + sys_col.Blue() * sys_factor)
)
self.priority_colours.append(colour)
def build(self, patch):
"""
@see: EditorMixin.build
"""
self.patch = patch
self.pwads = self.GetMDIParent().pwads
self.selected_index = -1
self.selected_row = -1
self.soundlist_build()
def update(self):
"""
@see: EditorMixin.update
"""
self.pwads = self.GetMDIParent().pwads
def soundlist_build(self):
"""
Builds the contents of the sounds list from scratch.
"""
self.SoundList.ClearAll()
# Add column headers if necessary.
if self.SoundList.GetColumnCount() == 0:
self.SoundList.InsertColumn(0, 'Index', width=41)
self.SoundList.InsertColumn(1, 'Name', width=54)
self.SoundList.InsertColumn(2, 'Priority', width=50)
self.SoundList.InsertColumn(3, 'Singular', width=58)
# Add dummy sound.
self.SoundList.InsertItem(0, '0')
self.SoundList.SetItemFont(0, config.FONT_MONOSPACED)
self.soundlist_update_row(0, 0)
# Add real sounds.
for sound_index in range(len(self.patch.sounds)):
self.SoundList.InsertItem(sound_index + 1, str(sound_index + 1))
self.SoundList.SetItemFont(sound_index + 1, config.FONT_MONOSPACED)
self.soundlist_update_row(sound_index + 1, sound_index)
self.list_autosize(self.SoundList)
self.SoundList.Select(0, True)
def soundlist_update_row(self, row_index, sound_index):
"""
Updates a sound list row with the data for that sound.
"""
sound = self.patch.sounds[sound_index]
if row_index == 0 or sound.unused:
self.SoundList.SetItem(row_index, 1, self.patch.get_sound_name(0))
self.SoundList.SetItem(row_index, 2, '')
self.SoundList.SetItem(row_index, 3, '')
self.SoundList.SetItemTextColour(row_index, self.UNUSED_TEXT_COLOUR)
self.SoundList.SetItemBackgroundColour(row_index, self.UNUSED_BACKGROUND_COLOUR)
else:
if sound['isSingular'] == 1:
singular = '◾'
else:
singular = ''
self.SoundList.SetItem(row_index, 1, self.patch.get_sound_name(row_index))
self.SoundList.SetItem(row_index, 2, str(sound['priority']))
self.SoundList.SetItem(row_index, 3, singular)
# Colour-code rows by priority.
color_index = int(sound['priority'] / 32)
if color_index >= len(self.priority_colours):
color_index = len(self.priority_colours) - 1
self.SoundList.SetItemBackgroundColour(row_index, self.priority_colours[color_index])
def sound_select_index(self, row_index, sound_index):
"""
Selects a sound by sound index.
"""
self.selected_index = sound_index
self.selected_row = row_index
self.update_properties()
def sound_restore(self, event):
"""
Restores the currently selected sound to it's engine state.
"""
self.undo_add()
self.patch.sounds[self.selected_index] = self.patch.engine.sounds[self.selected_index].clone()
self.soundlist_update_row(self.selected_row, self.selected_index)
self.update_properties()
self.is_modified(True)
def sound_play(self, event):
"""
Plays the currently selected sound.
"""
if self.selected_row == 0:
return
utils.sound_play(self.patch.sounds[self.selected_index].name, self.pwads)
def update_properties(self):
"""
Update the displayed property controls.
"""
if not self.patch:
return
sound = self.patch.sounds[self.selected_index]
if self.selected_row == 0 or sound.unused:
self.Priority.ChangeValue('')
self.PrioritySpinner.SetValue(0)
self.Singular.SetValue(False)
self.tools_set_state(False)
else:
singular = (sound['isSingular'] == 1)
self.Priority.ChangeValue(str(sound['priority']))
self.PrioritySpinner.SetValue(sound['priority'])
self.Singular.SetValue(singular)
self.tools_set_state(True)
def tools_set_state(self, enabled):
"""
Sets the state of all tool controls.
"""
# Override editing controls if sound support is disabled.
if 'nosupport.sounds' in self.patch.engine.features:
enabled = False
for window in self.WINDOWS_TOOLS:
window.Enable(enabled)
def set_singular(self, event):
"""
Sets the singularity flag for the currently selected sound.
"""
self.undo_add()
value = self.Singular.GetValue()
sound = self.patch.sounds[self.selected_index]
if value:
sound['isSingular'] = 1
else:
sound['isSingular'] = 0
self.soundlist_update_row(self.selected_row, self.selected_index)
self.is_modified(True)
def set_priority(self, event):
"""
Validates and sets a property of the current sound.
"""
self.undo_add()
window = self.FindWindowById(windows.SOUNDS_PRIORITY)
value = utils.validate_numeric(window)
# Clamp sprite to valid range.
if value < 0:
value = 0
elif value >= 0x7FFFFFFF:
value = 0x7FFFFFFF
if window.GetValue() != value:
window.ChangeValue(str(value))
sound = self.patch.sounds[self.selected_index]
sound['priority'] = value
self.soundlist_update_row(self.selected_row, self.selected_index)
self.is_modified(True)
def goto_sound_index(self, sound_index):
"""
Selects a sound from the list.
"""
self.SoundList.Select(sound_index, True)
self.SoundList.EnsureVisible(sound_index)
self.SoundList.SetFocus()
def undo_restore_item(self, item):
"""
@see: EditorMixin.undo_restore_item
"""
self.patch.sounds[item['index']] = item['item']
self.soundlist_update_row(item['index'] + 1, item['index'])
self.update_properties()
self.is_modified(True)
def undo_store_item(self):
"""
@see: EditorMixin.undo_store_item
"""
return {
'item': self.patch.sounds[self.selected_index].clone(),
'index': self.selected_index
}
def sound_select(self, event):
"""
Called when a sound row is selected from the list.
"""
self.sound_select_index(event.GetIndex(), event.GetIndex() - 1)
def priority_spin_up(self, event):
priority = int(self.Priority.GetValue())
self.Priority.SetValue(str(priority + 1))
def priority_spin_down(self, event):
priority = int(self.Priority.GetValue())
self.Priority.SetValue(str(priority - 1))
| {
"content_hash": "eea4ae740773a6164cf405d5de9f72c8",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 104,
"avg_line_length": 30.155844155844157,
"alnum_prop": 0.5871016365202412,
"repo_name": "GitExl/WhackEd4",
"id": "74234799d76eb79f2d9f49dd6d0d0bf74738f3f2",
"size": "9326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/whacked4/ui/editors/soundsframe.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1552"
},
{
"name": "CSS",
"bytes": "2026"
},
{
"name": "HTML",
"bytes": "17664"
},
{
"name": "Inno Setup",
"bytes": "2820"
},
{
"name": "Python",
"bytes": "485400"
}
],
"symlink_target": ""
} |
"""
felix.actor
~~~~~~~~~~~
A queue-based Actor framework that supports efficient handling of
batches of messages. Each Actor instance has its own greenlet
and a queue of pending messages. Messages are sent by making calls
to methods decorated by the @actor_message decorator.
When an actor_message-decorated method is called from another greenlet
the method call is wrapped up as a Message object and put on the
queue.
Note: callers must specify the async=True/False argument when calling
a actor_message-decorated method. If async=True is passed, the method
returns an AsyncResult. If async=False is passed, the method blocks
until the result is available and returns it as-is. As a convenience,
Actors may call their own decorated methods without passing async=...;
such calls are treated as normal, synchronous method calls.
Each time it is scheduled, the main loop of the Actor
* pulls all pending messages off the queue as a batch
* notifies the subclass that a batch is about to start via
_start_msg_batch()
* executes each of the actor_message method calls from the batch in order
* notifies the subclass that the batch is finished by calling
_finish_msg_batch()
* publishes the results from the batch via AsyncResults, allowing
callers to check for exceptions or receive a result.
Simple actors
~~~~~~~~~~~~~
A simple Actor may ignore the start/finish_msg_batch calls and do
all its work in the actor_message-decorated methods, ensuring that
all its invariants are restored by the end of each call.
Supporting batches
~~~~~~~~~~~~~~~~~~
For an actor that can handle a batch more efficiently, it may
initialize some per-batch state in the start_msg_batch function,
update the state from its actor_message methods and then "commit"
the state in _finish_msg_batch().
Since moving the commit stage to _finish_msg_batch() can make
it hard to report errors to the correct AsyncResult, the framework
supports the ability to split a batch of work and retry it from
the beginning. To make use of that function, an Actor must:
* take part in batching
* have actor_message methods that only affect the per-batch state
(i.e. it must defer its side effects to the _finish_msg_batch()
method)
* raise SplitBatchAndRetry from its _finish_msg_batch() method,
ensuring, of course, that it did not leave any resources
partially-modified.
Thread safety
~~~~~~~~~~~~~
While the framework makes it easy to avoid shared state, there are
some gotchas:
* Using the async=False feature blocks the current greenlet until the
one it is calling into returns a result. This can also cause deadlock
if there are call cycles.
* We deliberately use unbounded queues for queueing up messages between
actors. Bounding the queues would allow deadlock since the sending actor
can block on a full queue and the receiving actor may be blocked on the
queue of the sender, trying to send another message.
Unhandled Exceptions
~~~~~~~~~~~~~~~~~~~~
The framework keeps track of pending AsyncResults and tries to detect
callbacks that were GCed with a pending exception. If is detects such
an exception, it terminates the process on the assumption that
an unhandled exception implies a bug and may leave the system in an
inconsistent state.
"""
import collections
import functools
from calico.monotonic import monotonic_time
import gevent
import gevent.local
import logging
import os
import random
import sys
import traceback
import weakref
from gevent.event import AsyncResult
from calico.felix import futils
from calico.felix.futils import StatCounter
_log = logging.getLogger(__name__)
# Minimum gevent scheduling delay. A delay of 0 should mean "yield" but
# gevent has a known issue that a greenlet that sleeps for 0 may be rescheduled
# immediately. Any small positive value is enough to truly yield.
MIN_DELAY = 0.000001
ResultOrExc = collections.namedtuple("ResultOrExc", ("result", "exception"))
# Local storage to allow diagnostics.
actor_storage = gevent.local.local()
# Global diagnostic counters.
_stats = StatCounter("Actor framework counters")
class Actor(object):
"""
Class that contains a queue and a greenlet serving that queue.
"""
max_ops_before_yield = 1000
"""Number of calls to self._maybe_yield before it yields"""
batch_delay = 0.01
"""
Minimum delay between schedules of this Actor. Larger values encourage
more batching of messages and reduce starvation (but introduce more
latency when we're under load).
"""
def __init__(self, qualifier=None):
self._event_queue = collections.deque()
# Set to True when the main loop is actively processing the input
# queue or has been scheduled to do so. Set to False when the loop
# runs out of work and switches to the Hub to wait for more.
self._scheduled = True
# (Monotonic time) timestamp of last schedule.
self._last_scheduled = None
# Cache the gevent Hub and main loop.
self._gevent_hub = gevent.get_hub()
self._gevent_loop = self._gevent_hub.loop
self.greenlet = gevent.Greenlet(self._loop)
self._op_count = 0
self._current_msg = None
self.started = False
# Message being processed; purely for logging.
self.msg_id = None
# Logging parameters
self.qualifier = qualifier
if qualifier:
self.name = "%s(%s)" % (self.__class__.__name__, qualifier)
else:
self.name = self.__class__.__name__
# Can't use str(self) yet, it might not be ready until subclass
# constructed.
_log.info("%s created.", self.name)
def maybe_schedule(self, caller):
"""
Schedule this Actor to run iff it is not already running/scheduled.
:param str caller: A (debug) tag to pass to the greenlet to identify
the caller
"""
# This method uses very low-level gevent APIs for performance, it is
# partially cribbed from gevent's Waiter class, which is a simple
# application of the low-level APIs.
_log.debug("Checking whether we need to schedule %s", self)
# We use the lack of yield points in this code to ensure that no-one
# changes self._scheduled between the next two lines. Do not add
# logging between the test and set of self._scheduled.
if not self._scheduled:
self._scheduled = True
# Calculate the scheduling delay. If this Actor hasn't been
# scheduled for a long time, we'll schedule it straight away,
# otherwise we back off to the batch_delay to encourage work to be
# batched up when we're under load.
now = monotonic_time()
if self._last_scheduled is not None:
time_since_last_schedule = now - self._last_scheduled
delay = max(self.batch_delay - time_since_last_schedule,
MIN_DELAY)
else:
delay = MIN_DELAY
# We can't switch directly to the Actor's greenlet because that
# prevents gevent from doing its scheduling. Instead, we ask the
# gevent event loop to switch to the greenlet.
t = self._gevent_loop.timer(delay)
t.start(self._switch, caller)
self._last_scheduled = now
_log.debug("Scheduled %s", self)
def _switch(self, value):
"""
Switch to this Actor's greenlet, handling errors via the Hub's
handle_error method.
This should only be called from the gevent Hub.
"""
# This method uses very low-level gevent APIs for performance, it is
# partially cribbed from gevent's Waiter class, which is a simple
# application of the low-level APIs.
# WARNING: this method is called from the gevent Hub, it cannot use
# logging because logging can do IO, which is illegal from the Hub.
switch = self.greenlet.switch
try:
self.greenlet.switch(value)
except:
self._gevent_hub.handle_error(switch, *sys.exc_info())
def start(self):
assert not self.greenlet, "Already running"
_log.info("Starting %s", self)
self.started = True
self.greenlet.start()
return self
def _loop(self):
"""
Main greenlet loop, repeatedly runs _step(). Doesn't return normally.
"""
actor_storage.class_name = self.__class__.__name__
actor_storage.name = self.name
actor_storage.msg_id = None
try:
while True:
self._step()
except:
_log.exception("Exception killed %s", self)
raise
def _step(self):
"""
Run one iteration of the event loop for this actor. Mainly
broken out to allow the UTs to single-step an Actor.
It also has the beneficial side effect of introducing a new local
scope so that our variables die before we block next time.
"""
hub = gevent.get_hub()
while not self._event_queue:
# We've run out of work to process, note that fact and then switch
# to the Hub to allow something else to run. actor_message will
# wake us up when there's more work to do.
self._scheduled = False
caller = hub.switch()
# Before actor_message switches to us, it should set _scheduled
# back to True.
assert self._scheduled, ("Switched to %s from %s but _scheduled "
"set to False." % (self, caller))
msg = self._event_queue.popleft()
batch = [msg]
batches = []
if not msg.needs_own_batch:
# Try to pull some more work off the queue to combine into a
# batch.
while self._event_queue:
# We're the only ones getting from the queue so this should
# never fail.
msg = self._event_queue.popleft()
if msg.needs_own_batch:
if batch:
batches.append(batch)
batches.append([msg])
batch = []
else:
batch.append(msg)
if batch:
batches.append(batch)
num_splits = 0
while batches:
# Process the first batch on our queue of batches. Invariant:
# we'll either process this batch to completion and discard it or
# we'll put all the messages back into the batch queue in the same
# order but with a first batch that is half the size and the
# rest of its messages in the second batch.
batch = batches.pop(0)
# Give subclass a chance to filter the batch/update its state.
batch = self._start_msg_batch(batch)
assert batch is not None, "_start_msg_batch() should return batch."
results = [] # Will end up same length as batch.
for msg in batch:
_log.debug("Message %s recd by %s from %s, queue length %d",
msg, msg.recipient, msg.caller,
len(self._event_queue))
self._current_msg = msg
actor_storage.msg_id = msg.msg_id
actor_storage.msg_name = msg.name
try:
# Actually execute the per-message method and record its
# result.
result = msg.method()
except BaseException as e:
_log.exception("Exception processing %s", msg)
results.append(ResultOrExc(None, e))
_stats.increment("Messages executed with exception")
else:
results.append(ResultOrExc(result, None))
_stats.increment("Messages executed OK")
finally:
self._current_msg = None
actor_storage.msg_id = None
actor_storage.msg_name = None
try:
# Give subclass a chance to post-process the batch.
_log.debug("Finishing message batch of length %s", len(batch))
actor_storage.msg_name = "<finish batch>"
self._finish_msg_batch(batch, results)
except SplitBatchAndRetry:
# The subclass couldn't process the batch as is (probably
# because a failure occurred and it couldn't figure out which
# message caused the problem). Split the batch into two and
# re-run it.
_log.warn("Splitting batch to retry.")
self.__split_batch(batch, batches)
num_splits += 1 # For diags.
_stats.increment("Split batches")
continue
except BaseException as e:
# Most-likely a bug. Report failure to all callers.
_log.exception("_finish_msg_batch failed.")
results = [(None, e)] * len(results)
_stats.increment("_finish_msg_batch() exception")
else:
_log.debug("Finished message batch successfully")
finally:
actor_storage.msg_name = None
# Batch complete and finalized, set all the results.
assert len(batch) == len(results)
for msg, (result, exc) in zip(batch, results):
for future in msg.results:
if exc is not None:
future.set_exception(exc)
else:
future.set(result)
_stats.increment("Messages completed")
_stats.increment("Batches processed")
if num_splits > 0:
_log.warn("Split batches complete. Number of splits: %s",
num_splits)
@staticmethod
def __split_batch(current_batch, remaining_batches):
"""
Splits batch in half and prepends it to the list of remaining
batches. Modifies remaining_batches in-place.
:param list[Message] current_batch: list of messages that's currently
being processed.
:param list[list[Message]] remaining_batches: list of batches
still to process.
"""
assert len(current_batch) > 1, "Batch too small to split"
# Split the batch.
split_point = len(current_batch) // 2
_log.debug("Split-point = %s", split_point)
first_half = current_batch[:split_point]
second_half = current_batch[split_point:]
if remaining_batches and not remaining_batches[0][0].needs_own_batch:
# Optimization: there's another batch already queued and
# it also contains batchable messages push the second
# half of this batch onto the front of that one.
_log.debug("Split batch and found a subsequent batch, "
"coalescing with that.")
next_batch = remaining_batches[0]
next_batch[:0] = second_half
else:
_log.debug("Split batch but cannot prepend to next batch, adding "
"both splits to start of queue.")
remaining_batches[:0] = [second_half]
remaining_batches[:0] = [first_half]
def _start_msg_batch(self, batch):
"""
Called before processing a batch of messages to give subclasses
a chance to filter the batch. Implementations must ensure that
every AsyncResult in the batch is correctly set. Usually, that
means combining them into one list.
It is usually easier to build up a batch of changes to make in the
@actor_message-decorated methods and then process them in
_finish_msg_batch().
Intended to be overridden. This implementation simply returns the
input batch.
:param list[Message] batch:
"""
return batch
def _finish_msg_batch(self, batch, results):
"""
Called after a batch of events have been processed from the queue
before results are set.
Intended to be overridden. This implementation does nothing.
Exceptions raised by this method are propagated to all messages in the
batch, overriding the existing results. It is recommended that the
implementation catches appropriate exceptions and maps them back
to the correct entry in results.
:param list[ResultOrExc] results: Pairs of (result, exception)
representing the result of each message-processing function.
Only one of the values is set. Updates to the list alter the
result send to any waiting listeners.
:param list[Message] batch: The input batch, always the same length as
results.
"""
pass
def _maybe_yield(self):
"""
With some probability, yields processing to another greenlet.
(Utility method to be called from the actor's greenlet during
long-running operations.)
"""
self._op_count += 1
if self._op_count >= self.max_ops_before_yield:
gevent.sleep(MIN_DELAY)
self._op_count = 0
def __str__(self):
return self.__class__.__name__ + "<%s,queue_len=%s,live=%s,msg=%s>" % (
self.qualifier,
len(self._event_queue),
bool(self.greenlet),
self._current_msg
)
class SplitBatchAndRetry(Exception):
"""
Exception that may be raised by _finish_msg_batch() to cause the
batch of messages to be split, each message to be re-executed and
then the smaller batches delivered to _finish_msg_batch() again.
"""
pass
def wait_and_check(async_results):
for r in async_results:
r.get()
# Start with a random offset to make the log easier to grep.
next_message_id = random.randint(0, sys.maxint)
class Message(object):
"""
Message passed to an actor.
"""
__slots__ = ("msg_id", "method", "results", "caller", "name",
"needs_own_batch", "recipient")
def __init__(self, msg_id, method, results, caller_path, recipient,
needs_own_batch):
self.msg_id = msg_id
self.method = method
self.results = results
self.caller = caller_path
self.name = method.func.__name__
self.needs_own_batch = needs_own_batch
self.recipient = recipient
_stats.increment("Messages created")
def __str__(self):
data = ("%s (%s)" % (self.msg_id, self.name))
return data
def actor_message(needs_own_batch=False):
"""
Decorator: turns a method into an Actor message.
Calls to the wrapped method will be queued via the Actor's message queue.
The caller to a wrapped method must specify the async=True/False
argument to specify whether they want their own thread to block
waiting for the result.
If async=True is passed, the wrapped method returns an AsyncResult.
Otherwise, it blocks and returns the result (or raises the exception)
as-is.
Using async=False to block the current thread can be very convenient but
it can also deadlock if there is a cycle of blocking calls. Use with
caution.
:param bool needs_own_batch: True if this message should be processed
in its own batch.
"""
def decorator(fn):
method_name = fn.__name__
@functools.wraps(fn)
def queue_fn(self, *args, **kwargs):
# Calculating the calling information is expensive, so only do it
# if debug is enabled.
caller = "<disabled>"
caller_name = "<disabled>"
calling_path = "<disabled>"
if _log.isEnabledFor(logging.DEBUG):
# Get call information for logging purposes.
calling_file, line_no, func, _ = traceback.extract_stack()[-2]
calling_file = os.path.basename(calling_file)
calling_path = "%s:%s:%s" % (calling_file, line_no, func)
try:
caller_name = "%s.%s" % (actor_storage.class_name,
actor_storage.msg_name)
caller = "%s (processing %s)" % (actor_storage.name,
actor_storage.msg_id)
except AttributeError:
caller_name = calling_path
caller = calling_path
# Figure out our arguments.
async_set = "async" in kwargs
async = kwargs.pop("async", False)
on_same_greenlet = (self.greenlet == gevent.getcurrent())
if on_same_greenlet and not async:
# Bypass the queue if we're already on the same greenlet, or we
# would deadlock by waiting for ourselves.
return fn(self, *args, **kwargs)
else:
# Only log a stat if we're not simulating a normal method call.
# WARNING: only use stable values in the stat name.
# For example, Actor.name can be different for every actor,
# resulting in leak if we use that.
_stats.increment(
"%s message %s --[%s]-> %s" %
("ASYNC" if async else "BLOCKING",
caller_name,
method_name,
self.__class__.__name__)
)
# async must be specified, unless on the same actor.
assert async_set, "Cross-actor event calls must specify async arg."
# Allocate a message ID. We rely on there being no yield point
# here for thread safety.
global next_message_id
msg_id = "M%016x" % next_message_id
if next_message_id == sys.maxint:
next_message_id = 0
else:
next_message_id += 1
if not on_same_greenlet and not async:
_stats.increment("Blocking calls started")
_log.debug("BLOCKING CALL: [%s] %s -> %s", msg_id,
calling_path, method_name)
# OK, so build the message and put it on the queue.
partial = functools.partial(fn, self, *args, **kwargs)
result = TrackedAsyncResult((calling_path, caller,
self.name, method_name))
msg = Message(msg_id, partial, [result], caller, self.name,
needs_own_batch=needs_own_batch)
_log.debug("Message %s sent by %s to %s, queue length %d",
msg, caller, self.name, len(self._event_queue))
self._event_queue.append(msg)
self.maybe_schedule(caller)
if async:
return result
else:
blocking_result = None
try:
blocking_result = result.get()
except BaseException as e:
blocking_result = e
raise
finally:
_stats.increment("Blocking calls completed")
_log.debug("BLOCKING CALL COMPLETE: [%s] %s -> %s = %r",
msg_id, calling_path, method_name,
blocking_result)
return blocking_result
queue_fn.func = fn
return queue_fn
return decorator
# Each time we create a TrackedAsyncResult, me make a weak reference to it
# so that we can get a callback (_on_ref_reaped()) when the TrackedAsyncResult
# is GCed. This is roughly equivalent to adding a __del__ method to the
# TrackedAsyncResult but it doesn't interfere with the GC.
#
# In order for the callback to get called, we have to keep the weak reference
# alive until after the TrackedAsyncResult itself is GCed. To do that, we
# stash a reference to the weak ref in this dict and then clean it up in
# _on_ref_reaped().
_tracked_refs_by_idx = {}
_ref_idx = 0
def dump_actor_diags(log):
log.info("Current ref index: %s", _ref_idx)
log.info("Number of tracked messages outstanding: %s",
len(_tracked_refs_by_idx))
futils.register_diags("Actor framework", dump_actor_diags)
class ExceptionTrackingWeakRef(weakref.ref):
"""
Specialised weak reference with a slot to hold an exception
that was leaked.
"""
# Note: superclass implements __new__ so we have to mimic its args
# and have the callback passed in.
def __init__(self, obj, callback):
super(ExceptionTrackingWeakRef, self).__init__(obj, callback)
self.exception = None
self.tag = None
# Callback won't get triggered if we die before the object we reference
# so stash a reference to this object, which we clean up when the
# TrackedAsyncResult is GCed.
global _ref_idx
self.idx = _ref_idx
_ref_idx += 1
_tracked_refs_by_idx[self.idx] = self
def __str__(self):
return (self.__class__.__name__ + "<%s/%s,exc=%s>" %
(self.tag, self.idx, self.exception))
def _on_ref_reaped(ref):
"""
Called when a TrackedAsyncResult gets GCed.
Looks for leaked exceptions.
:param ExceptionTrackingWeakRef ref: The ref that may contain a leaked
exception.
"""
# Future maintainers: This function *must not* do any IO of any kind, or
# generally do anything that would cause gevent to yield the flow of
# control. See issue #587 for more details.
assert isinstance(ref, ExceptionTrackingWeakRef)
del _tracked_refs_by_idx[ref.idx]
if ref.exception:
try:
msg = ("TrackedAsyncResult %s was leaked with "
"exception %r. Dying." % (ref.tag, ref.exception))
_print_to_stderr(msg)
finally:
# Called from the GC so we can't raise an exception, just die.
_exit(1)
class TrackedAsyncResult(AsyncResult):
"""
An AsyncResult that tracks if any exceptions are leaked.
"""
def __init__(self, tag):
super(TrackedAsyncResult, self).__init__()
# Avoid keeping a reference to the weak ref directly; look it up
# when needed. Also, be careful not to attach any debugging
# information to the ref that could produce a reference cycle. The
# tag should be something simple like a string or tuple.
tr = ExceptionTrackingWeakRef(self, _on_ref_reaped)
tr.tag = tag
self.__ref_idx = tr.idx
@property
def __ref(self):
return _tracked_refs_by_idx[self.__ref_idx]
def set_exception(self, exception):
self.__ref.exception = exception
return super(TrackedAsyncResult, self).set_exception(exception)
def get(self, block=True, timeout=None):
try:
result = super(TrackedAsyncResult, self).get(block=block,
timeout=timeout)
finally:
# Someone called get so any exception can't be leaked. Discard it.
self.__ref.exception = None
return result
# Factored out for UTs to stub.
def _print_to_stderr(msg):
print >> sys.stderr, msg
def _exit(rc):
"""
Immediately terminates this process with the given return code.
This function is mainly here to be mocked out in UTs.
"""
os._exit(rc) # pragma nocover
| {
"content_hash": "29fd197829ac292d647edabfe54a9e58",
"timestamp": "",
"source": "github",
"line_count": 713,
"max_line_length": 79,
"avg_line_length": 38.87938288920056,
"alnum_prop": 0.6006276829840194,
"repo_name": "TrimBiggs/calico",
"id": "49d716fbed4407b6147b7e5a4d676727138a91f5",
"size": "28383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calico/felix/actor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "853711"
},
{
"name": "Shell",
"bytes": "13082"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import datetime
import django
from django.contrib.admin.templatetags.admin_list import ResultList, result_headers
from django.contrib.admin.utils import display_for_field, display_for_value, lookup_field
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.forms.utils import flatatt
from django.template import Library
from django.template.loader import get_template
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
register = Library()
def items_for_result(view, result):
"""
Generates the actual list of data.
"""
modeladmin = view.model_admin
for field_name in view.list_display:
empty_value_display = modeladmin.get_empty_value_display(field_name)
row_classes = ['field-%s' % field_name]
try:
f, attr, value = lookup_field(field_name, result, modeladmin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(
attr, 'empty_value_display', empty_value_display)
if f is None or f.auto_created:
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean or not value:
allow_tags = True
if django.VERSION >= (1, 9):
result_repr = display_for_value(
value, empty_value_display, boolean)
else:
result_repr = display_for_value(value, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
if django.VERSION >= (1, 9):
result_repr = display_for_field(
value, f, empty_value_display)
else:
result_repr = display_for_field(value, f)
if isinstance(f, (
models.DateField, models.TimeField, models.ForeignKey)
):
row_classes.append('nowrap')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
row_classes.extend(
modeladmin.get_extra_class_names_for_field_col(field_name, result))
row_attrs_dict = modeladmin.get_extra_attrs_for_field_col(
field_name, result)
row_attrs_dict['class'] = ' ' . join(row_classes)
row_attrs = flatatt(row_attrs_dict)
yield format_html('<td{}>{}</td>', row_attrs, result_repr)
def results(view, object_list):
for item in object_list:
yield ResultList(None, items_for_result(view, item))
@register.inclusion_tag("modeladmin/includes/result_list.html",
takes_context=True)
def result_list(context):
"""
Displays the headers and data list together
"""
view = context['view']
object_list = context['object_list']
headers = list(result_headers(view))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
context.update({
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(view, object_list))})
return context
@register.simple_tag
def pagination_link_previous(current_page, view):
if current_page.has_previous():
previous_page_number0 = current_page.previous_page_number() - 1
return format_html(
'<li class="prev"><a href="%s" class="icon icon-arrow-left">%s'
'</a></li>' %
(view.get_query_string({view.PAGE_VAR: previous_page_number0}),
_('Previous'))
)
return ''
@register.simple_tag
def pagination_link_next(current_page, view):
if current_page.has_next():
next_page_number0 = current_page.next_page_number() - 1
return format_html(
'<li class="next"><a href="%s" class="icon icon-arrow-right-after"'
'>%s</a></li>' %
(view.get_query_string({view.PAGE_VAR: next_page_number0}),
_('Next'))
)
return ''
@register.inclusion_tag(
"modeladmin/includes/search_form.html", takes_context=True)
def search_form(context):
context.update({'search_var': context['view'].SEARCH_VAR})
return context
@register.simple_tag
def admin_list_filter(view, spec):
template_name = spec.template
if template_name == 'admin/filter.html':
template_name = 'modeladmin/includes/filter.html'
tpl = get_template(template_name)
return tpl.render({
'title': spec.title,
'choices': list(spec.choices(view)),
'spec': spec,
})
@register.inclusion_tag(
"modeladmin/includes/result_row.html", takes_context=True)
def result_row_display(context, index):
obj = context['object_list'][index]
view = context['view']
row_attrs_dict = view.model_admin.get_extra_attrs_for_row(obj, context)
row_attrs_dict['data-object-pk'] = obj.pk
odd_or_even = 'odd' if (index % 2 == 0) else 'even'
if 'class' in row_attrs_dict:
row_attrs_dict['class'] += ' %s' % odd_or_even
else:
row_attrs_dict['class'] = odd_or_even
context.update({
'obj': obj,
'row_attrs': mark_safe(flatatt(row_attrs_dict)),
'action_buttons': view.get_buttons_for_obj(obj),
})
return context
@register.inclusion_tag(
"modeladmin/includes/result_row_value.html", takes_context=True)
def result_row_value_display(context, index):
add_action_buttons = False
item = context['item']
closing_tag = mark_safe(item[-5:])
request = context['request']
model_admin = context['view'].model_admin
field_name = model_admin.get_list_display(request)[index]
if field_name == model_admin.get_list_display_add_buttons(request):
add_action_buttons = True
item = mark_safe(item[0:-5])
context.update({
'item': item,
'add_action_buttons': add_action_buttons,
'closing_tag': closing_tag,
})
return context
@register.filter
def get_content_type_for_obj(obj):
return obj.__class__._meta.verbose_name
| {
"content_hash": "8187209dd41c8b7b5cae405446d7182c",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 89,
"avg_line_length": 35.54081632653061,
"alnum_prop": 0.5970427792133218,
"repo_name": "chrxr/wagtail",
"id": "c216a82940ed1d4470a8e484a6e59d54bc4858d6",
"size": "6966",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wagtail/contrib/modeladmin/templatetags/modeladmin_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "177912"
},
{
"name": "HTML",
"bytes": "306962"
},
{
"name": "JavaScript",
"bytes": "123857"
},
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "2703800"
},
{
"name": "Shell",
"bytes": "7597"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class ErrorResponse(Model):
"""The error object.
:param error: Error.
:type error: ~azure.mgmt.resource.managementgroups.models.ErrorDetails
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetails'},
}
def __init__(self, error=None):
super(ErrorResponse, self).__init__()
self.error = error
class ErrorResponseException(HttpOperationError):
"""Server responsed with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
| {
"content_hash": "1cea87edd169f394f10b678c53ae0cc7",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 99,
"avg_line_length": 27.633333333333333,
"alnum_prop": 0.6755126658624849,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "d4af1d388d68ee2114db0e15d458065bc421a7b0",
"size": "1303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-resource/azure/mgmt/resource/managementgroups/models/error_response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
import proto # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.bigquery.v2",
manifest={
"EncryptionConfiguration",
},
)
class EncryptionConfiguration(proto.Message):
r"""
Attributes:
kms_key_name (google.protobuf.wrappers_pb2.StringValue):
Optional. Describes the Cloud KMS encryption
key that will be used to protect destination
BigQuery table. The BigQuery Service Account
associated with your project requires access to
this encryption key.
"""
kms_key_name = proto.Field(
proto.MESSAGE,
number=1,
message=wrappers_pb2.StringValue,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "9776210d914d212be090129611662dbc",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 64,
"avg_line_length": 24.21212121212121,
"alnum_prop": 0.6382978723404256,
"repo_name": "googleapis/python-bigquery",
"id": "9f57acb7c1f5b94b85b222e7dbf1bc8c3b71ac3a",
"size": "1399",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/bigquery_v2/types/encryption_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2520564"
},
{
"name": "Shell",
"bytes": "31939"
}
],
"symlink_target": ""
} |
'''
These files were generated from the spinn3r protobuf spec used in
their custom "protostream" format
'''
| {
"content_hash": "22265d11592a4c921418ed4f1e8ab19b",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 65,
"avg_line_length": 27,
"alnum_prop": 0.7685185185185185,
"repo_name": "trec-kba/streamcorpus-pipeline",
"id": "42b973a58beca089f97a9ca0ae39ce1a727b32f7",
"size": "108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "streamcorpus_pipeline/_spinn3r/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "855862"
},
{
"name": "Makefile",
"bytes": "1559"
},
{
"name": "Python",
"bytes": "582428"
},
{
"name": "Roff",
"bytes": "35618"
},
{
"name": "Shell",
"bytes": "5107"
}
],
"symlink_target": ""
} |
"""Run the main function"""
if __name__ == '__main__':
main()
def main():
"""The main function"""
print nicks_add(10, 3)
def nicks_add(number_one, number_two):
"""Adds a and b together
number_one -- a number
number_two -- another number
return -- number_one + number_two
"""
return number_one + number_two
| {
"content_hash": "6556ff26e2f9f38ce377a8fe22ed0e9a",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 38,
"avg_line_length": 22.933333333333334,
"alnum_prop": 0.5872093023255814,
"repo_name": "UFSEC/vim-tutorial",
"id": "acaca263a2eafe5e7bf3f6e5cc50189779c4736f",
"size": "344",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "syntastic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "2096"
},
{
"name": "Python",
"bytes": "344"
}
],
"symlink_target": ""
} |
import datetime
import json
import logging
import sys
import string, random
import types
from hashlib import md5
from django.http import HttpResponse
from django.forms.models import model_to_dict
from django.core.cache import cache
import decimal
import re
reload(sys)
sys.setdefaultencoding("utf-8")
Logger = logging.getLogger("normal")
from django.db import connection, transaction
# 重写构造json类,遇到日期特殊处理
class CJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime("%Y-%m-%d")
elif isinstance(obj, datetime.time):
return obj.strftime('%H:%M:%S')
elif isinstance(obj,decimal.Decimal):
return float(obj)
else:
return json.JSONEncoder.default(self, obj)
def my_response(code, msg, content="", status_code=200, content_type="text/json"):
if code != 0:
Logger.info("request handler error, return data is:: {}".format(msg))
data = json.dumps({"code": code, "msg": msg, "content": content})
response = HttpResponse(data, status=status_code, content_type=content_type)
response[
"Access-Control-Allow-Origin"] = "*,http://localhost:8080,http://127.0.0.1:8080,http://localhost,http://127.0.0.1" # 如果要所有访问则属性设置为*,但不合适
response["Access-Control-Allow-Headers"] = "Access-Control-Allow-Origin, x-requested-with, content-type"
return response
def queryset_to_dict(queryset, query_field):
'''
只返回query_field中存在的字段,防止数据泄露
'''
output_list = []
for query in queryset:
query = model_to_dict(query)
job_dict = {}
for field in query_field:
job_dict[field] = query.get(field, '')
output_list.append(job_dict)
return output_list
def dict_to_json(data_list):
data = json.dumps(data_list, cls=CJsonEncoder)
return data
def get_random():
'''
随机码生成
'''
base = string.ascii_letters + "0123456789" + "!@#$%^&*<>?~{}"
src = "".join(random.sample(base, 16))
m5 = md5()
m5.update(src)
return m5.hexdigest()
def getMondaySunday():
'''
获取当前日期的星期一及星期填的datetime
'''
today = datetime.date.today()
Sunday = today + datetime.timedelta(6 - today.weekday())
Monday = today + datetime.timedelta(-today.weekday())
return (Monday,Sunday)
def get_day_of_week():
'''
获取当前日期的周数
'''
today = datetime.date.today()
day_of_tuple=today.isocalendar() #返回类似(2017, 22, 3)
result=str(day_of_tuple[0])+"-"+str(day_of_tuple[1])
return (result)
def fetch_data(sql):
'''
通过sql 获取数据,返回元祖
'''
with connection.cursor() as cursor:
cursor.execute(sql)
col_names = [desc[0] for desc in cursor.description]
sql_result = cursor.fetchall()
results = []
for row in sql_result:
if row is None:
break
results.append(dict(zip(col_names, row)))
return results
def get_user_id(request):
'''
从缓存中获取当前用户的userid
'''
sid=request.COOKIES.get("sid",'')
user_object=cache.get(sid)
try:
user_id=user_object.get("user_id")
except:
user_id=-1
return user_id
def get_first_day(weekflag):
'''
根据周数获得每周的起始和结束日期,传入的值例如“2017-01-5周”
'''
yearnum = weekflag[0:4] #取到年份
weeknum = weekflag[5:7] #取到周
stryearstart = yearnum +'0101' #当年第一天
yearstart = datetime.datetime.strptime(stryearstart,'%Y%m%d') #格式化为日期格式
yearstartcalendarmsg = yearstart.isocalendar() #当年第一天的周信息
yearstartweek = yearstartcalendarmsg[1]
yearstartweekday = yearstartcalendarmsg[2]
yearstartyear = yearstartcalendarmsg[0]
if yearstartyear < int (yearnum):
daydelat = (8-int(yearstartweekday))+(int(weeknum)-1)*7
else :
daydelat = (8-int(yearstartweekday))+(int(weeknum)-2)*7
first_day = (yearstart+datetime.timedelta(days=daydelat)).date()
last_day =first_day+datetime.timedelta(days=6)
start_date=first_day.strftime("%Y-%m-%d")
end_date=last_day.strftime("%Y-%m-%d")
return (start_date,end_date)
def day_of_week(source):
'''
计算某一天是星期几,输入格式为 2017-1-1
'''
week_list=['星期日','星期一','星期二','星期三','星期四','星期五','星期六']
__match=re.compile('^\d{4}-\d{1,2}-\d{1,2}').match(source)
if isinstance(source, datetime.datetime):
date=source
elif isinstance(source, datetime.date):
date=source
elif __match:
source=__match.group()
date=datetime.datetime.strptime(source,"%Y-%m-%d")
week_id=int(date.strftime("%w"))
which_day=week_list[week_id]
return which_day
| {
"content_hash": "6a61cd9b30c7f071c53c3f6cc25dbbc4",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 145,
"avg_line_length": 27.410404624277458,
"alnum_prop": 0.6244200759173345,
"repo_name": "llfccc/weekly",
"id": "39d187488a388039c2f8951c8dab8e2966c1a4ef",
"size": "5162",
"binary": false,
"copies": "1",
"ref": "refs/heads/publish",
"path": "weekly/utils/tools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "137646"
},
{
"name": "HTML",
"bytes": "933"
},
{
"name": "JavaScript",
"bytes": "135488"
},
{
"name": "Jupyter Notebook",
"bytes": "15293"
},
{
"name": "Nginx",
"bytes": "3028"
},
{
"name": "Python",
"bytes": "125464"
},
{
"name": "Shell",
"bytes": "3126"
},
{
"name": "Vue",
"bytes": "183359"
}
],
"symlink_target": ""
} |
"""
This module brings together a variety of NLTK functionality for
text analysis, and provides simple, interactive interfaces.
Functionality includes: concordancing, collocation discovery,
regular expression search over tokenized strings, and
distributional similarity.
"""
from __future__ import print_function, division, unicode_literals
from math import log
from collections import defaultdict
from functools import reduce
from itertools import islice
import re
from nltk.probability import FreqDist, LidstoneProbDist
from nltk.probability import ConditionalFreqDist as CFD
from nltk.util import tokenwrap, LazyConcatenation
from nltk.metrics import f_measure, BigramAssocMeasures
from nltk.collocations import BigramCollocationFinder
from nltk.compat import python_2_unicode_compatible, text_type, Counter
class ContextIndex(object):
"""
A bidirectional index between words and their 'contexts' in a text.
The context of a word is usually defined to be the words that occur
in a fixed window around the word; but other definitions may also
be used by providing a custom context function.
"""
@staticmethod
def _default_context(tokens, i):
"""One left token and one right token, normalized to lowercase"""
left = (tokens[i-1].lower() if i != 0 else '*START*')
right = (tokens[i+1].lower() if i != len(tokens) - 1 else '*END*')
return (left, right)
def __init__(self, tokens, context_func=None, filter=None, key=lambda x:x):
self._key = key
self._tokens = tokens
if context_func:
self._context_func = context_func
else:
self._context_func = self._default_context
if filter:
tokens = [t for t in tokens if filter(t)]
self._word_to_contexts = CFD((self._key(w), self._context_func(tokens, i))
for i, w in enumerate(tokens))
self._context_to_words = CFD((self._context_func(tokens, i), self._key(w))
for i, w in enumerate(tokens))
def tokens(self):
"""
:rtype: list(str)
:return: The document that this context index was
created from.
"""
return self._tokens
def word_similarity_dict(self, word):
"""
Return a dictionary mapping from words to 'similarity scores,'
indicating how often these two words occur in the same
context.
"""
word = self._key(word)
word_contexts = set(self._word_to_contexts[word])
scores = {}
for w, w_contexts in self._word_to_contexts.items():
scores[w] = f_measure(word_contexts, set(w_contexts))
return scores
def similar_words(self, word, n=20):
scores = defaultdict(int)
for c in self._word_to_contexts[self._key(word)]:
for w in self._context_to_words[c]:
if w != word:
scores[w] += self._context_to_words[c][word] * self._context_to_words[c][w]
return sorted(scores, key=scores.get, reverse=True)[:n]
def common_contexts(self, words, fail_on_unknown=False):
"""
Find contexts where the specified words can all appear; and
return a frequency distribution mapping each context to the
number of times that context was used.
:param words: The words used to seed the similarity search
:type words: str
:param fail_on_unknown: If true, then raise a value error if
any of the given words do not occur at all in the index.
"""
words = [self._key(w) for w in words]
contexts = [set(self._word_to_contexts[w]) for w in words]
empty = [words[i] for i in range(len(words)) if not contexts[i]]
common = reduce(set.intersection, contexts)
if empty and fail_on_unknown:
raise ValueError("The following word(s) were not found:",
" ".join(words))
elif not common:
# nothing in common -- just return an empty freqdist.
return FreqDist()
else:
fd = FreqDist(c for w in words
for c in self._word_to_contexts[w]
if c in common)
return fd
@python_2_unicode_compatible
class ConcordanceIndex(object):
"""
An index that can be used to look up the offset locations at which
a given word occurs in a document.
"""
def __init__(self, tokens, key=lambda x:x):
"""
Construct a new concordance index.
:param tokens: The document (list of tokens) that this
concordance index was created from. This list can be used
to access the context of a given word occurrence.
:param key: A function that maps each token to a normalized
version that will be used as a key in the index. E.g., if
you use ``key=lambda s:s.lower()``, then the index will be
case-insensitive.
"""
self._tokens = tokens
"""The document (list of tokens) that this concordance index
was created from."""
self._key = key
"""Function mapping each token to an index key (or None)."""
self._offsets = defaultdict(list)
"""Dictionary mapping words (or keys) to lists of offset
indices."""
# Initialize the index (self._offsets)
for index, word in enumerate(tokens):
word = self._key(word)
self._offsets[word].append(index)
def tokens(self):
"""
:rtype: list(str)
:return: The document that this concordance index was
created from.
"""
return self._tokens
def offsets(self, word):
"""
:rtype: list(int)
:return: A list of the offset positions at which the given
word occurs. If a key function was specified for the
index, then given word's key will be looked up.
"""
word = self._key(word)
return self._offsets[word]
def __repr__(self):
return '<ConcordanceIndex for %d tokens (%d types)>' % (
len(self._tokens), len(self._offsets))
def print_concordance(self, word, width=75, lines=25):
"""
Print a concordance for ``word`` with the specified context window.
:param word: The target word
:type word: str
:param width: The width of each line, in characters (default=80)
:type width: int
:param lines: The number of lines to display (default=25)
:type lines: int
"""
half_width = (width - len(word) - 2) // 2
context = width // 4 # approx number of words of context
offsets = self.offsets(word)
if offsets:
lines = min(lines, len(offsets))
print("Displaying %s of %s matches:" % (lines, len(offsets)))
for i in offsets:
if lines <= 0:
break
left = (' ' * half_width +
' '.join(self._tokens[i-context:i]))
right = ' '.join(self._tokens[i+1:i+context])
left = left[-half_width:]
right = right[:half_width]
print(left, self._tokens[i], right)
lines -= 1
else:
print("No matches")
class TokenSearcher(object):
"""
A class that makes it easier to use regular expressions to search
over tokenized strings. The tokenized string is converted to a
string where tokens are marked with angle brackets -- e.g.,
``'<the><window><is><still><open>'``. The regular expression
passed to the ``findall()`` method is modified to treat angle
brackets as non-capturing parentheses, in addition to matching the
token boundaries; and to have ``'.'`` not match the angle brackets.
"""
def __init__(self, tokens):
self._raw = ''.join('<'+w+'>' for w in tokens)
def findall(self, regexp):
"""
Find instances of the regular expression in the text.
The text is a list of tokens, and a regexp pattern to match
a single token must be surrounded by angle brackets. E.g.
>>> from nltk.text import TokenSearcher
>>> print('hack'); from nltk.book import text1, text5, text9
hack...
>>> text5.findall("<.*><.*><bro>")
you rule bro; telling you bro; u twizted bro
>>> text1.findall("<a>(<.*>)<man>")
monied; nervous; dangerous; white; white; white; pious; queer; good;
mature; white; Cape; great; wise; wise; butterless; white; fiendish;
pale; furious; better; certain; complete; dismasted; younger; brave;
brave; brave; brave
>>> text9.findall("<th.*>{3,}")
thread through those; the thought that; that the thing; the thing
that; that that thing; through these than through; them that the;
through the thick; them that they; thought that the
:param regexp: A regular expression
:type regexp: str
"""
# preprocess the regular expression
regexp = re.sub(r'\s', '', regexp)
regexp = re.sub(r'<', '(?:<(?:', regexp)
regexp = re.sub(r'>', ')>)', regexp)
regexp = re.sub(r'(?<!\\)\.', '[^>]', regexp)
# perform the search
hits = re.findall(regexp, self._raw)
# Sanity check
for h in hits:
if not h.startswith('<') and h.endswith('>'):
raise ValueError('Bad regexp for TokenSearcher.findall')
# postprocess the output
hits = [h[1:-1].split('><') for h in hits]
return hits
@python_2_unicode_compatible
class Text(object):
"""
A wrapper around a sequence of simple (string) tokens, which is
intended to support initial exploration of texts (via the
interactive console). Its methods perform a variety of analyses
on the text's contexts (e.g., counting, concordancing, collocation
discovery), and display the results. If you wish to write a
program which makes use of these analyses, then you should bypass
the ``Text`` class, and use the appropriate analysis function or
class directly instead.
A ``Text`` is typically initialized from a given document or
corpus. E.g.:
>>> import nltk.corpus
>>> from nltk.text import Text
>>> moby = Text(nltk.corpus.gutenberg.words('melville-moby_dick.txt'))
"""
# This defeats lazy loading, but makes things faster. This
# *shouldn't* be necessary because the corpus view *should* be
# doing intelligent caching, but without this it's running slow.
# Look into whether the caching is working correctly.
_COPY_TOKENS = True
def __init__(self, tokens, name=None):
"""
Create a Text object.
:param tokens: The source text.
:type tokens: sequence of str
"""
if self._COPY_TOKENS:
tokens = list(tokens)
self.tokens = tokens
if name:
self.name = name
elif ']' in tokens[:20]:
end = tokens[:20].index(']')
self.name = " ".join(text_type(tok) for tok in tokens[1:end])
else:
self.name = " ".join(text_type(tok) for tok in tokens[:8]) + "..."
#////////////////////////////////////////////////////////////
# Support item & slice access
#////////////////////////////////////////////////////////////
def __getitem__(self, i):
if isinstance(i, slice):
return self.tokens[i.start:i.stop]
else:
return self.tokens[i]
def __len__(self):
return len(self.tokens)
#////////////////////////////////////////////////////////////
# Interactive console methods
#////////////////////////////////////////////////////////////
def concordance(self, word, width=79, lines=25):
"""
Print a concordance for ``word`` with the specified context window.
Word matching is not case-sensitive.
:seealso: ``ConcordanceIndex``
"""
if '_concordance_index' not in self.__dict__:
#print("Building index...")
self._concordance_index = ConcordanceIndex(self.tokens,
key=lambda s:s.lower())
self._concordance_index.print_concordance(word, width, lines)
def collocations(self, num=20, window_size=2):
"""
Print collocations derived from the text, ignoring stopwords.
:seealso: find_collocations
:param num: The maximum number of collocations to print.
:type num: int
:param window_size: The number of tokens spanned by a collocation (default=2)
:type window_size: int
"""
if not ('_collocations' in self.__dict__ and self._num == num and self._window_size == window_size):
self._num = num
self._window_size = window_size
#print("Building collocations list")
from nltk.corpus import stopwords
ignored_words = stopwords.words('english')
finder = BigramCollocationFinder.from_words(self.tokens, window_size)
finder.apply_freq_filter(2)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
bigram_measures = BigramAssocMeasures()
self._collocations = finder.nbest(bigram_measures.likelihood_ratio, num)
colloc_strings = [w1+' '+w2 for w1, w2 in self._collocations]
print(tokenwrap(colloc_strings, separator="; "))
def count(self, word):
"""
Count the number of times this word appears in the text.
"""
return self.tokens.count(word)
def index(self, word):
"""
Find the index of the first occurrence of the word in the text.
"""
return self.tokens.index(word)
def readability(self, method):
# code from nltk_contrib.readability
raise NotImplementedError
def similar(self, word, num=20):
"""
Distributional similarity: find other words which appear in the
same contexts as the specified word; list most similar words first.
:param word: The word used to seed the similarity search
:type word: str
:param num: The number of words to generate (default=20)
:type num: int
:seealso: ContextIndex.similar_words()
"""
if '_word_context_index' not in self.__dict__:
#print('Building word-context index...')
self._word_context_index = ContextIndex(self.tokens,
filter=lambda x:x.isalpha(),
key=lambda s:s.lower())
# words = self._word_context_index.similar_words(word, num)
word = word.lower()
wci = self._word_context_index._word_to_contexts
if word in wci.conditions():
contexts = set(wci[word])
fd = Counter(w for w in wci.conditions() for c in wci[w]
if c in contexts and not w == word)
words = [w for w, _ in fd.most_common(num)]
print(tokenwrap(words))
else:
print("No matches")
def common_contexts(self, words, num=20):
"""
Find contexts where the specified words appear; list
most frequent common contexts first.
:param word: The word used to seed the similarity search
:type word: str
:param num: The number of words to generate (default=20)
:type num: int
:seealso: ContextIndex.common_contexts()
"""
if '_word_context_index' not in self.__dict__:
#print('Building word-context index...')
self._word_context_index = ContextIndex(self.tokens,
key=lambda s:s.lower())
try:
fd = self._word_context_index.common_contexts(words, True)
if not fd:
print("No common contexts were found")
else:
ranked_contexts = [w for w, _ in fd.most_common(num)]
print(tokenwrap(w1+"_"+w2 for w1,w2 in ranked_contexts))
except ValueError as e:
print(e)
def dispersion_plot(self, words):
"""
Produce a plot showing the distribution of the words through the text.
Requires pylab to be installed.
:param words: The words to be plotted
:type words: list(str)
:seealso: nltk.draw.dispersion_plot()
"""
from nltk.draw import dispersion_plot
dispersion_plot(self, words)
def generate(self, words):
"""
Issues a reminder to users following the book online
"""
import warnings
warnings.warn('The generate() method is no longer available.', DeprecationWarning)
def plot(self, *args):
"""
See documentation for FreqDist.plot()
:seealso: nltk.prob.FreqDist.plot()
"""
self.vocab().plot(*args)
def vocab(self):
"""
:seealso: nltk.prob.FreqDist
"""
if "_vocab" not in self.__dict__:
#print("Building vocabulary index...")
self._vocab = FreqDist(self)
return self._vocab
def findall(self, regexp):
"""
Find instances of the regular expression in the text.
The text is a list of tokens, and a regexp pattern to match
a single token must be surrounded by angle brackets. E.g.
>>> print('hack'); from nltk.book import text1, text5, text9
hack...
>>> text5.findall("<.*><.*><bro>")
you rule bro; telling you bro; u twizted bro
>>> text1.findall("<a>(<.*>)<man>")
monied; nervous; dangerous; white; white; white; pious; queer; good;
mature; white; Cape; great; wise; wise; butterless; white; fiendish;
pale; furious; better; certain; complete; dismasted; younger; brave;
brave; brave; brave
>>> text9.findall("<th.*>{3,}")
thread through those; the thought that; that the thing; the thing
that; that that thing; through these than through; them that the;
through the thick; them that they; thought that the
:param regexp: A regular expression
:type regexp: str
"""
if "_token_searcher" not in self.__dict__:
self._token_searcher = TokenSearcher(self)
hits = self._token_searcher.findall(regexp)
hits = [' '.join(h) for h in hits]
print(tokenwrap(hits, "; "))
#////////////////////////////////////////////////////////////
# Helper Methods
#////////////////////////////////////////////////////////////
_CONTEXT_RE = re.compile('\w+|[\.\!\?]')
def _context(self, tokens, i):
"""
One left & one right token, both case-normalized. Skip over
non-sentence-final punctuation. Used by the ``ContextIndex``
that is created for ``similar()`` and ``common_contexts()``.
"""
# Left context
j = i-1
while j>=0 and not self._CONTEXT_RE.match(tokens[j]):
j -= 1
left = (tokens[j] if j != 0 else '*START*')
# Right context
j = i+1
while j<len(tokens) and not self._CONTEXT_RE.match(tokens[j]):
j += 1
right = (tokens[j] if j != len(tokens) else '*END*')
return (left, right)
#////////////////////////////////////////////////////////////
# String Display
#////////////////////////////////////////////////////////////
def __str__(self):
return '<Text: %s>' % self.name
def __repr__(self):
return '<Text: %s>' % self.name
# Prototype only; this approach will be slow to load
class TextCollection(Text):
"""A collection of texts, which can be loaded with list of texts, or
with a corpus consisting of one or more texts, and which supports
counting, concordancing, collocation discovery, etc. Initialize a
TextCollection as follows:
>>> import nltk.corpus
>>> from nltk.text import TextCollection
>>> print('hack'); from nltk.book import text1, text2, text3
hack...
>>> gutenberg = TextCollection(nltk.corpus.gutenberg)
>>> mytexts = TextCollection([text1, text2, text3])
Iterating over a TextCollection produces all the tokens of all the
texts in order.
"""
def __init__(self, source):
if hasattr(source, 'words'): # bridge to the text corpus reader
source = [source.words(f) for f in source.fileids()]
self._texts = source
Text.__init__(self, LazyConcatenation(source))
self._idf_cache = {}
def tf(self, term, text):
""" The frequency of the term in text. """
return text.count(term) / len(text)
def idf(self, term):
""" The number of texts in the corpus divided by the
number of texts that the term appears in.
If a term does not appear in the corpus, 0.0 is returned. """
# idf values are cached for performance.
idf = self._idf_cache.get(term)
if idf is None:
matches = len([True for text in self._texts if term in text])
# FIXME Should this raise some kind of error instead?
idf = (log(len(self._texts) / matches) if matches else 0.0)
self._idf_cache[term] = idf
return idf
def tf_idf(self, term, text):
return self.tf(term, text) * self.idf(term)
def demo():
from nltk.corpus import brown
text = Text(brown.words(categories='news'))
print(text)
print()
print("Concordance:")
text.concordance('news')
print()
print("Distributionally similar words:")
text.similar('news')
print()
print("Collocations:")
text.collocations()
print()
#print("Automatically generated text:")
#text.generate()
#print()
print("Dispersion plot:")
text.dispersion_plot(['news', 'report', 'said', 'announced'])
print()
print("Vocabulary plot:")
text.plot(50)
print()
print("Indexing:")
print("text[3]:", text[3])
print("text[3:5]:", text[3:5])
print("text.vocab()['news']:", text.vocab()['news'])
if __name__ == '__main__':
demo()
__all__ = ["ContextIndex",
"ConcordanceIndex",
"TokenSearcher",
"Text",
"TextCollection"]
| {
"content_hash": "07841bbe6125ac27846f7225d9620a84",
"timestamp": "",
"source": "github",
"line_count": 609,
"max_line_length": 108,
"avg_line_length": 37.00985221674877,
"alnum_prop": 0.5715870269310972,
"repo_name": "xiaozhuchacha/OpenBottle",
"id": "6fb9fc44e3cd457d00209cc6ad8ca5c400d76ad1",
"size": "22773",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "action_earley_srv/scripts/nltk/text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "17514"
},
{
"name": "C++",
"bytes": "153149"
},
{
"name": "CMake",
"bytes": "175576"
},
{
"name": "Component Pascal",
"bytes": "66739"
},
{
"name": "Java",
"bytes": "728775"
},
{
"name": "MATLAB",
"bytes": "15776"
},
{
"name": "Makefile",
"bytes": "160500"
},
{
"name": "Python",
"bytes": "8885703"
},
{
"name": "Shell",
"bytes": "10157"
}
],
"symlink_target": ""
} |
def extractYamiTranslations(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'Tensei Shoujo no Rirekisho' in item['tags']:
return buildReleaseMessageWithType(item, 'Tensei Shoujo no Rirekisho', vol, chp, frag=frag, postfix=postfix)
if 'shoujo resume' in item['tags']:
return buildReleaseMessageWithType(item, 'Tensei Shoujo no Rirekisho', vol, chp, frag=frag, postfix=postfix)
if 'Ouroboros Record' in item['tags']:
return buildReleaseMessageWithType(item, 'Ouroboros Record', vol, chp, frag=frag, postfix=postfix)
if 'Light Beyond' in item['tags']:
return buildReleaseMessageWithType(item, 'Light Beyond', vol, chp, frag=frag, postfix=postfix)
if 'otome game ga rokkushume' in item['tags']:
return buildReleaseMessageWithType(item, 'Otome Game Rokkushuume, Automode ga Kiremashita', vol, chp, frag=frag, postfix=postfix)
return False | {
"content_hash": "3fcfe09a40befa2f660ca52cfcd0b2b6",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 131,
"avg_line_length": 54.388888888888886,
"alnum_prop": 0.7487231869254342,
"repo_name": "fake-name/ReadableWebProxy",
"id": "b64e121644bfa90dfd88fb6b927a0f8b21267136",
"size": "979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractYamiTranslations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
import re
from django.template import Library, Node, TemplateSyntaxError
from django.template.base import token_kwargs
from django.urls import Resolver404, resolve
from django.utils.html import format_html
register = Library()
class MenuItemNode(Node):
def __init__(self, nodelist, pattern, kwargs):
self.nodelist = nodelist
self.pattern = pattern
self.kwargs = kwargs
def render(self, context):
pattern = self.pattern.resolve(context)
classes = []
if 'class' in self.kwargs:
classes = self.kwargs['class'].resolve(context).split()
try:
func = resolve(context['request'].path).func
except Resolver404:
return ''
match = func.__module__ + '.' + func.__name__
if re.search(pattern, match):
classes.append('active')
if classes:
open_tag = format_html('<li class="{}">', ' '.join(classes))
else:
open_tag = format_html('<li>')
content = self.nodelist.render(context)
close_tag = format_html('</li>')
return open_tag + content + close_tag
@register.tag
def menuitem(parser, token):
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument, a pattern matching a view name." % bits[0])
pattern = parser.compile_filter(bits[1])
kwargs = token_kwargs(bits[2:], parser)
nodelist = parser.parse(('endmenuitem',))
parser.delete_first_token()
return MenuItemNode(nodelist, pattern, kwargs)
| {
"content_hash": "d6a2e196fc22ecb1470eb006501512b6",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 112,
"avg_line_length": 29.754716981132077,
"alnum_prop": 0.6201648700063411,
"repo_name": "Inter-Actief/alexia",
"id": "3a081670c8619a8dbe9b2b1bb3b4d9935ec6801d",
"size": "1577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alexia/apps/general/templatetags/menuitem.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17029"
},
{
"name": "HTML",
"bytes": "179103"
},
{
"name": "JavaScript",
"bytes": "511580"
},
{
"name": "Python",
"bytes": "372488"
},
{
"name": "Shell",
"bytes": "66"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import pytest
from freediscovery.text import FeatureVectorizer
from freediscovery.cluster import _ClusteringWrapper, select_top_words
from freediscovery.lsi import _LSIWrapper
from .run_suite import check_cache
NCLUSTERS = 2
def fd_setup():
basename = os.path.dirname(__file__)
cache_dir = check_cache()
np.random.seed(1)
data_dir = os.path.join(basename, "..", "data", "ds_001", "raw")
n_features = 110000
fe = FeatureVectorizer(cache_dir=cache_dir)
dsid = fe.preprocess(data_dir, file_pattern='.*\d.txt',
n_features=n_features, use_hashing=False,
stop_words='english',
min_df=0.1, max_df=0.9) # TODO unused variable 'uuid' (overwritten on the next line)
dsid, filenames = fe.transform()
lsi = _LSIWrapper(cache_dir=cache_dir, parent_id=dsid)
lsi.fit_transform(n_components=6)
return cache_dir, dsid, filenames, lsi.mid
def check_cluster_consistency(labels, terms):
assert isinstance(labels, (list, np.ndarray))
assert isinstance(terms, list)
assert len(np.unique(labels)) == len(terms)
@pytest.mark.parametrize('method, use_lsi, args, cl_args',
[['k_means', None, {}, {}],
['k_means', True, {}, {}],
['birch', True, {'threshold': 0.5}, {}],
['ward_hc', True, {'n_neighbors': 5}, {}],
['dbscan', False, {'eps':0.5, 'min_samples': 2}, {}],
['dbscan', True, {'eps':0.5, 'min_samples': 2}, {}],
])
def test_clustering(method, use_lsi, args, cl_args):
cache_dir, uuid, filenames, lsi_id = fd_setup()
np.random.seed(1)
n_top_words = 9
if use_lsi:
parent_id = lsi_id
else:
parent_id = uuid
cat = _ClusteringWrapper(cache_dir=cache_dir, parent_id=parent_id)
cm = getattr(cat, method)
labels, htree = cm(NCLUSTERS, **args)
terms = cat.compute_labels(n_top_words=n_top_words, **cl_args)
mid = cat.mid
if method == 'ward_hc':
assert sorted(htree.keys()) == sorted(['n_leaves', 'n_components', 'children'])
else:
assert htree == {}
if method == 'dbscan':
assert (labels != -1).all()
check_cluster_consistency(labels, terms)
cat.scores(np.random.randint(0, NCLUSTERS-1, size=len(labels)), labels)
# load the model saved to disk
km = cat._load_model()
assert_allclose(labels, km.labels_)
if method != 'dbscan':
# DBSCAN does not take the number of clusters as input
assert len(terms) == NCLUSTERS
assert len(np.unique(labels)) == NCLUSTERS
for el in terms:
assert len(el) == n_top_words
cluster_indices = np.nonzero(labels == 0)
if use_lsi:
# use_lsi=False is not supported for now
terms2 = cat.compute_labels(cluster_indices=cluster_indices, **cl_args)
# 70% of the terms at least should match
if method != 'dbscan':
assert sum([el in terms[0] for el in terms2[0]]) > 0.7*len(terms2[0])
cat2 = _ClusteringWrapper(cache_dir=cache_dir, mid=mid) # make sure we can load it # TODO unused variable
cat.delete()
def test_denrogram_children():
# temporary solution for https://stackoverflow.com/questions/40239956/node-indexing-in-hierarachical-clustering-dendrograms
import numpy as np
from scipy.cluster.hierarchy import dendrogram, linkage
from freediscovery.cluster import _DendrogramChildren
# generate two clusters: a with 10 points, b with 5:
np.random.seed(1)
a = np.random.multivariate_normal([10, 0], [[3, 1], [1, 4]], size=[10,])
b = np.random.multivariate_normal([0, 20], [[3, 1], [1, 4]], size=[5,])
X = np.concatenate((a, b),)
Z = linkage(X, 'ward')
# make distances between pairs of children uniform
# (re-scales the horizontal (distance) axis when plotting)
Z[:, 2] = np.arange(Z.shape[0])+1
ddata = dendrogram(Z, no_plot=True)
dc = _DendrogramChildren(ddata)
idx = 0
# check that we can compute children for all nodes
for i, d, c in zip(ddata['icoord'], ddata['dcoord'], ddata['color_list']):
node_children = dc.query(idx)
idx += 1
# last level node should encompass all samples
assert len(node_children) == X.shape[0]
assert_allclose(sorted(node_children), np.arange(X.shape[0]))
def test_dbscan_noisy_utils():
from freediscovery.cluster.utils import (_dbscan_noisy2unique,
_dbscan_unique2noisy)
from sklearn.metrics import v_measure_score
x_ref = np.array([-1, 0, -1, 1, 1, -1, 0])
y_ref = np.array([2, 0, 3, 1, 1, 4, 0])
y = _dbscan_noisy2unique(x_ref)
assert v_measure_score(y, y_ref) == 1
# check inverse transform
x = _dbscan_unique2noisy(y_ref)
assert v_measure_score(x, x_ref) == 1
def test_binary_linkage2clusters():
from freediscovery.cluster.utils import _binary_linkage2clusters
from sklearn.metrics import v_measure_score
n_samples = 10
linkage = np.array([[1, 2],
[2, 3],
[5, 7],
[6, 9]])
cluster_id = _binary_linkage2clusters(linkage, n_samples)
cluster_id_ref = np.array([0, 1, 1, 1, 2, 3, 4, 3, 5, 4])
assert cluster_id.shape == cluster_id_ref.shape
assert v_measure_score(cluster_id, cluster_id_ref) == 1.0 # i.e. same clusters
def test_merge_clusters():
from freediscovery.cluster.utils import _merge_clusters
X = np.array([[1, 2, 7, 9, 7, 8]]).T
y = _merge_clusters(X)
assert_equal(X, y[:,None])
X_new = np.concatenate((X, X, X, X), axis=1)
y = _merge_clusters(X_new)
assert_equal(X, y[:,None])
X = np.array([[1, 1, 2, 2, 3, 1, 3],
[2, 4, 2, 5, 1, 1, 3]]).T
y = _merge_clusters(X)
assert_equal(y, [1, 1, 1, 1, 3, 1, 3])
def test_select_top_words():
words_list = ['apple', 'apples', 'test', 'go']
n_words = 2
res = select_top_words(words_list, n=n_words)
assert len(res) == n_words
assert res == ['apple', 'test']
| {
"content_hash": "8094a12dd581e0adeedbb7aa50ed26c6",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 127,
"avg_line_length": 34.101604278074866,
"alnum_prop": 0.5960482985729967,
"repo_name": "kcompher/FreeDiscovUI",
"id": "879768afee2c5dbc520d25ac3e5e78beb404142f",
"size": "6419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "freediscovery/tests/test_cluster.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "404"
},
{
"name": "Makefile",
"bytes": "598"
},
{
"name": "Nginx",
"bytes": "451"
},
{
"name": "Python",
"bytes": "333007"
},
{
"name": "Shell",
"bytes": "3721"
}
],
"symlink_target": ""
} |
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc as sa_orm_exc
from webob import exc as web_exc
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron.common import exceptions
from neutron.db import model_base
from neutron.db import models_v2
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.vmware.extensions import networkgw
LOG = logging.getLogger(__name__)
DEVICE_OWNER_NET_GW_INTF = 'network:gateway-interface'
NETWORK_ID = 'network_id'
SEGMENTATION_TYPE = 'segmentation_type'
SEGMENTATION_ID = 'segmentation_id'
ALLOWED_CONNECTION_ATTRIBUTES = set((NETWORK_ID,
SEGMENTATION_TYPE,
SEGMENTATION_ID))
# Constants for gateway device operational status
STATUS_UNKNOWN = "UNKNOWN"
STATUS_ERROR = "ERROR"
STATUS_ACTIVE = "ACTIVE"
STATUS_DOWN = "DOWN"
class GatewayInUse(exceptions.InUse):
message = _("Network Gateway '%(gateway_id)s' still has active mappings "
"with one or more neutron networks.")
class GatewayNotFound(exceptions.NotFound):
message = _("Network Gateway %(gateway_id)s could not be found")
class GatewayDeviceInUse(exceptions.InUse):
message = _("Network Gateway Device '%(device_id)s' is still used by "
"one or more network gateways.")
class GatewayDeviceNotFound(exceptions.NotFound):
message = _("Network Gateway Device %(device_id)s could not be found.")
class NetworkGatewayPortInUse(exceptions.InUse):
message = _("Port '%(port_id)s' is owned by '%(device_owner)s' and "
"therefore cannot be deleted directly via the port API.")
class GatewayConnectionInUse(exceptions.InUse):
message = _("The specified mapping '%(mapping)s' is already in use on "
"network gateway '%(gateway_id)s'.")
class MultipleGatewayConnections(exceptions.NeutronException):
message = _("Multiple network connections found on '%(gateway_id)s' "
"with provided criteria.")
class GatewayConnectionNotFound(exceptions.NotFound):
message = _("The connection %(network_mapping_info)s was not found on the "
"network gateway '%(network_gateway_id)s'")
class NetworkGatewayUnchangeable(exceptions.InUse):
message = _("The network gateway %(gateway_id)s "
"cannot be updated or deleted")
# Add exceptions to HTTP Faults mappings
base.FAULT_MAP.update({GatewayInUse: web_exc.HTTPConflict,
NetworkGatewayPortInUse: web_exc.HTTPConflict,
GatewayConnectionInUse: web_exc.HTTPConflict,
GatewayConnectionNotFound: web_exc.HTTPNotFound,
MultipleGatewayConnections: web_exc.HTTPConflict})
class NetworkConnection(model_base.BASEV2, models_v2.HasTenant):
"""Defines a connection between a network gateway and a network."""
# We use port_id as the primary key as one can connect a gateway
# to a network in multiple ways (and we cannot use the same port form
# more than a single gateway)
network_gateway_id = sa.Column(sa.String(36),
sa.ForeignKey('networkgateways.id',
ondelete='CASCADE'))
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete='CASCADE'))
segmentation_type = sa.Column(
sa.Enum('flat', 'vlan',
name='networkconnections_segmentation_type'))
segmentation_id = sa.Column(sa.Integer)
__table_args__ = (sa.UniqueConstraint(network_gateway_id,
segmentation_type,
segmentation_id),)
# Also, storing port id comes back useful when disconnecting a network
# from a gateway
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete='CASCADE'),
primary_key=True)
class NetworkGatewayDeviceReference(model_base.BASEV2):
id = sa.Column(sa.String(36), primary_key=True)
network_gateway_id = sa.Column(sa.String(36),
sa.ForeignKey('networkgateways.id',
ondelete='CASCADE'),
primary_key=True)
interface_name = sa.Column(sa.String(64), primary_key=True)
class NetworkGatewayDevice(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
nsx_id = sa.Column(sa.String(36))
# Optional name for the gateway device
name = sa.Column(sa.String(255))
# Transport connector type. Not using enum as range of
# connector types might vary with backend version
connector_type = sa.Column(sa.String(10))
# Transport connector IP Address
connector_ip = sa.Column(sa.String(64))
# operational status
status = sa.Column(sa.String(16))
class NetworkGateway(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
"""Defines the data model for a network gateway."""
name = sa.Column(sa.String(255))
# Tenant id is nullable for this resource
tenant_id = sa.Column(sa.String(36))
default = sa.Column(sa.Boolean())
devices = orm.relationship(NetworkGatewayDeviceReference,
backref='networkgateways',
cascade='all,delete')
network_connections = orm.relationship(NetworkConnection, lazy='joined')
class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
gateway_resource = networkgw.GATEWAY_RESOURCE_NAME
device_resource = networkgw.DEVICE_RESOURCE_NAME
def _get_network_gateway(self, context, gw_id):
try:
gw = self._get_by_id(context, NetworkGateway, gw_id)
except sa_orm_exc.NoResultFound:
raise GatewayNotFound(gateway_id=gw_id)
return gw
def _make_gw_connection_dict(self, gw_conn):
return {'port_id': gw_conn['port_id'],
'segmentation_type': gw_conn['segmentation_type'],
'segmentation_id': gw_conn['segmentation_id']}
def _make_network_gateway_dict(self, network_gateway, fields=None):
device_list = []
for d in network_gateway['devices']:
device_list.append({'id': d['id'],
'interface_name': d['interface_name']})
res = {'id': network_gateway['id'],
'name': network_gateway['name'],
'default': network_gateway['default'],
'devices': device_list,
'tenant_id': network_gateway['tenant_id']}
# Query gateway connections only if needed
if (fields and 'ports' in fields) or not fields:
res['ports'] = [self._make_gw_connection_dict(conn)
for conn in network_gateway.network_connections]
return self._fields(res, fields)
def _set_mapping_info_defaults(self, mapping_info):
if not mapping_info.get('segmentation_type'):
mapping_info['segmentation_type'] = 'flat'
if not mapping_info.get('segmentation_id'):
mapping_info['segmentation_id'] = 0
def _validate_network_mapping_info(self, network_mapping_info):
self._set_mapping_info_defaults(network_mapping_info)
network_id = network_mapping_info.get(NETWORK_ID)
if not network_id:
raise exceptions.InvalidInput(
error_message=_("A network identifier must be specified "
"when connecting a network to a network "
"gateway. Unable to complete operation"))
connection_attrs = set(network_mapping_info.keys())
if not connection_attrs.issubset(ALLOWED_CONNECTION_ATTRIBUTES):
raise exceptions.InvalidInput(
error_message=(_("Invalid keys found among the ones provided "
"in request body: %(connection_attrs)s."),
connection_attrs))
seg_type = network_mapping_info.get(SEGMENTATION_TYPE)
seg_id = network_mapping_info.get(SEGMENTATION_ID)
if not seg_type and seg_id:
msg = _("In order to specify a segmentation id the "
"segmentation type must be specified as well")
raise exceptions.InvalidInput(error_message=msg)
elif seg_type and seg_type.lower() == 'flat' and seg_id:
msg = _("Cannot specify a segmentation id when "
"the segmentation type is flat")
raise exceptions.InvalidInput(error_message=msg)
return network_id
def _retrieve_gateway_connections(self, context, gateway_id,
mapping_info={}, only_one=False):
filters = {'network_gateway_id': [gateway_id]}
for k, v in mapping_info.iteritems():
if v and k != NETWORK_ID:
filters[k] = [v]
query = self._get_collection_query(context,
NetworkConnection,
filters)
return only_one and query.one() or query.all()
def _unset_default_network_gateways(self, context):
with context.session.begin(subtransactions=True):
context.session.query(NetworkGateway).update(
{NetworkGateway.default: False})
def _set_default_network_gateway(self, context, gw_id):
with context.session.begin(subtransactions=True):
gw = (context.session.query(NetworkGateway).
filter_by(id=gw_id).one())
gw['default'] = True
def prevent_network_gateway_port_deletion(self, context, port):
"""Pre-deletion check.
Ensures a port will not be deleted if is being used by a network
gateway. In that case an exception will be raised.
"""
if port['device_owner'] == DEVICE_OWNER_NET_GW_INTF:
raise NetworkGatewayPortInUse(port_id=port['id'],
device_owner=port['device_owner'])
def create_network_gateway(self, context, network_gateway):
gw_data = network_gateway[self.gateway_resource]
tenant_id = self._get_tenant_id_for_create(context, gw_data)
with context.session.begin(subtransactions=True):
gw_db = NetworkGateway(
id=gw_data.get('id', uuidutils.generate_uuid()),
tenant_id=tenant_id,
name=gw_data.get('name'))
# Device list is guaranteed to be a valid list
# TODO(salv-orlando): Enforce that gateway device identifiers
# in this list are among the tenant's NSX network gateway devices
# to avoid risk a tenant 'guessing' other tenant's network devices
gw_db.devices.extend([NetworkGatewayDeviceReference(**device)
for device in gw_data['devices']])
context.session.add(gw_db)
LOG.debug(_("Created network gateway with id:%s"), gw_db['id'])
return self._make_network_gateway_dict(gw_db)
def update_network_gateway(self, context, id, network_gateway):
gw_data = network_gateway[self.gateway_resource]
with context.session.begin(subtransactions=True):
gw_db = self._get_network_gateway(context, id)
if gw_db.default:
raise NetworkGatewayUnchangeable(gateway_id=id)
# Ensure there is something to update before doing it
if any([gw_db[k] != gw_data[k] for k in gw_data]):
gw_db.update(gw_data)
LOG.debug(_("Updated network gateway with id:%s"), id)
return self._make_network_gateway_dict(gw_db)
def get_network_gateway(self, context, id, fields=None):
gw_db = self._get_network_gateway(context, id)
return self._make_network_gateway_dict(gw_db, fields)
def delete_network_gateway(self, context, id):
with context.session.begin(subtransactions=True):
gw_db = self._get_network_gateway(context, id)
if gw_db.network_connections:
raise GatewayInUse(gateway_id=id)
if gw_db.default:
raise NetworkGatewayUnchangeable(gateway_id=id)
context.session.delete(gw_db)
LOG.debug(_("Network gateway '%s' was destroyed."), id)
def get_network_gateways(self, context, filters=None, fields=None):
return self._get_collection(context, NetworkGateway,
self._make_network_gateway_dict,
filters=filters, fields=fields)
def connect_network(self, context, network_gateway_id,
network_mapping_info):
network_id = self._validate_network_mapping_info(network_mapping_info)
LOG.debug(_("Connecting network '%(network_id)s' to gateway "
"'%(network_gateway_id)s'"),
{'network_id': network_id,
'network_gateway_id': network_gateway_id})
with context.session.begin(subtransactions=True):
gw_db = self._get_network_gateway(context, network_gateway_id)
tenant_id = self._get_tenant_id_for_create(context, gw_db)
# TODO(salvatore-orlando): Leverage unique constraint instead
# of performing another query!
if self._retrieve_gateway_connections(context,
network_gateway_id,
network_mapping_info):
raise GatewayConnectionInUse(mapping=network_mapping_info,
gateway_id=network_gateway_id)
# TODO(salvatore-orlando): Creating a port will give it an IP,
# but we actually do not need any. Instead of wasting an IP we
# should have a way to say a port shall not be associated with
# any subnet
try:
# We pass the segmentation type and id too - the plugin
# might find them useful as the network connection object
# does not exist yet.
# NOTE: they're not extended attributes, rather extra data
# passed in the port structure to the plugin
# TODO(salvatore-orlando): Verify optimal solution for
# ownership of the gateway port
port = self.create_port(context, {
'port':
{'tenant_id': tenant_id,
'network_id': network_id,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': True,
'fixed_ips': [],
'device_id': network_gateway_id,
'device_owner': DEVICE_OWNER_NET_GW_INTF,
'name': '',
'gw:segmentation_type':
network_mapping_info.get('segmentation_type'),
'gw:segmentation_id':
network_mapping_info.get('segmentation_id')}})
except exceptions.NetworkNotFound:
err_msg = (_("Requested network '%(network_id)s' not found."
"Unable to create network connection on "
"gateway '%(network_gateway_id)s") %
{'network_id': network_id,
'network_gateway_id': network_gateway_id})
LOG.error(err_msg)
raise exceptions.InvalidInput(error_message=err_msg)
port_id = port['id']
LOG.debug(_("Gateway port for '%(network_gateway_id)s' "
"created on network '%(network_id)s':%(port_id)s"),
{'network_gateway_id': network_gateway_id,
'network_id': network_id,
'port_id': port_id})
# Create NetworkConnection record
network_mapping_info['port_id'] = port_id
network_mapping_info['tenant_id'] = tenant_id
gw_db.network_connections.append(
NetworkConnection(**network_mapping_info))
port_id = port['id']
# now deallocate and recycle ip from the port
for fixed_ip in port.get('fixed_ips', []):
self._delete_ip_allocation(context, network_id,
fixed_ip['subnet_id'],
fixed_ip['ip_address'])
LOG.debug(_("Ensured no Ip addresses are configured on port %s"),
port_id)
return {'connection_info':
{'network_gateway_id': network_gateway_id,
'network_id': network_id,
'port_id': port_id}}
def disconnect_network(self, context, network_gateway_id,
network_mapping_info):
network_id = self._validate_network_mapping_info(network_mapping_info)
LOG.debug(_("Disconnecting network '%(network_id)s' from gateway "
"'%(network_gateway_id)s'"),
{'network_id': network_id,
'network_gateway_id': network_gateway_id})
with context.session.begin(subtransactions=True):
# Uniquely identify connection, otherwise raise
try:
net_connection = self._retrieve_gateway_connections(
context, network_gateway_id,
network_mapping_info, only_one=True)
except sa_orm_exc.NoResultFound:
raise GatewayConnectionNotFound(
network_mapping_info=network_mapping_info,
network_gateway_id=network_gateway_id)
except sa_orm_exc.MultipleResultsFound:
raise MultipleGatewayConnections(
gateway_id=network_gateway_id)
# Remove gateway port from network
# FIXME(salvatore-orlando): Ensure state of port in NVP is
# consistent with outcome of transaction
self.delete_port(context, net_connection['port_id'],
nw_gw_port_check=False)
# Remove NetworkConnection record
context.session.delete(net_connection)
def _make_gateway_device_dict(self, gateway_device, fields=None,
include_nsx_id=False):
res = {'id': gateway_device['id'],
'name': gateway_device['name'],
'status': gateway_device['status'],
'connector_type': gateway_device['connector_type'],
'connector_ip': gateway_device['connector_ip'],
'tenant_id': gateway_device['tenant_id']}
if include_nsx_id:
# Return the NSX mapping as well. This attribute will not be
# returned in the API response anyway. Ensure it will not be
# filtered out in field selection.
if fields:
fields.append('nsx_id')
res['nsx_id'] = gateway_device['nsx_id']
return self._fields(res, fields)
def _get_gateway_device(self, context, device_id):
try:
return self._get_by_id(context, NetworkGatewayDevice, device_id)
except sa_orm_exc.NoResultFound:
raise GatewayDeviceNotFound(device_id=device_id)
def _is_device_in_use(self, context, device_id):
query = self._get_collection_query(
context, NetworkGatewayDeviceReference, {'id': [device_id]})
return query.first()
def get_gateway_device(self, context, device_id, fields=None,
include_nsx_id=False):
return self._make_gateway_device_dict(
self._get_gateway_device(context, device_id),
fields, include_nsx_id)
def get_gateway_devices(self, context, filters=None, fields=None,
include_nsx_id=False):
query = self._get_collection_query(context,
NetworkGatewayDevice,
filters=filters)
return [self._make_gateway_device_dict(row, fields, include_nsx_id)
for row in query]
def create_gateway_device(self, context, gateway_device,
initial_status=STATUS_UNKNOWN):
device_data = gateway_device[self.device_resource]
tenant_id = self._get_tenant_id_for_create(context, device_data)
with context.session.begin(subtransactions=True):
device_db = NetworkGatewayDevice(
id=device_data.get('id', uuidutils.generate_uuid()),
tenant_id=tenant_id,
name=device_data.get('name'),
connector_type=device_data['connector_type'],
connector_ip=device_data['connector_ip'],
status=initial_status)
context.session.add(device_db)
LOG.debug(_("Created network gateway device: %s"), device_db['id'])
return self._make_gateway_device_dict(device_db)
def update_gateway_device(self, context, gateway_device_id,
gateway_device, include_nsx_id=False):
device_data = gateway_device[self.device_resource]
with context.session.begin(subtransactions=True):
device_db = self._get_gateway_device(context, gateway_device_id)
# Ensure there is something to update before doing it
if any([device_db[k] != device_data[k] for k in device_data]):
device_db.update(device_data)
LOG.debug(_("Updated network gateway device: %s"),
gateway_device_id)
return self._make_gateway_device_dict(
device_db, include_nsx_id=include_nsx_id)
def delete_gateway_device(self, context, device_id):
with context.session.begin(subtransactions=True):
# A gateway device should not be deleted
# if it is used in any network gateway service
if self._is_device_in_use(context, device_id):
raise GatewayDeviceInUse(device_id=device_id)
device_db = self._get_gateway_device(context, device_id)
context.session.delete(device_db)
LOG.debug(_("Deleted network gateway device: %s."), device_id)
| {
"content_hash": "45cd22b40cb18771d28b92ac4ccea79f",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 79,
"avg_line_length": 47.9531914893617,
"alnum_prop": 0.5839914810542195,
"repo_name": "yamahata/neutron",
"id": "6a1c3c21b3c5d6b3d17fd0190ad019b3d80e16ad",
"size": "23165",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/plugins/vmware/dbexts/networkgw_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Python",
"bytes": "8778164"
},
{
"name": "Shell",
"bytes": "8920"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
import unittest
import sys
from unittest.mock import Mock, MagicMock, _magics
class TestMockingMagicMethods(unittest.TestCase):
def test_deleting_magic_methods(self):
mock = Mock()
self.assertFalse(hasattr(mock, '__getitem__'))
mock.__getitem__ = Mock()
self.assertTrue(hasattr(mock, '__getitem__'))
del mock.__getitem__
self.assertFalse(hasattr(mock, '__getitem__'))
def test_magicmock_del(self):
mock = MagicMock()
# before using getitem
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
mock = MagicMock()
# this time use it first
mock['foo']
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
def test_magic_method_wrapping(self):
mock = Mock()
def f(self, name):
return self, 'fish'
mock.__getitem__ = f
self.assertIsNot(mock.__getitem__, f)
self.assertEqual(mock['foo'], (mock, 'fish'))
self.assertEqual(mock.__getitem__('foo'), (mock, 'fish'))
mock.__getitem__ = mock
self.assertIs(mock.__getitem__, mock)
def test_magic_methods_isolated_between_mocks(self):
mock1 = Mock()
mock2 = Mock()
mock1.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock1), [])
self.assertRaises(TypeError, lambda: list(mock2))
def test_repr(self):
mock = Mock()
self.assertEqual(repr(mock), "<Mock id='%s'>" % id(mock))
mock.__repr__ = lambda s: 'foo'
self.assertEqual(repr(mock), 'foo')
def test_str(self):
mock = Mock()
self.assertEqual(str(mock), object.__str__(mock))
mock.__str__ = lambda s: 'foo'
self.assertEqual(str(mock), 'foo')
def test_dict_methods(self):
mock = Mock()
self.assertRaises(TypeError, lambda: mock['foo'])
def _del():
del mock['foo']
def _set():
mock['foo'] = 3
self.assertRaises(TypeError, _del)
self.assertRaises(TypeError, _set)
_dict = {}
def getitem(s, name):
return _dict[name]
def setitem(s, name, value):
_dict[name] = value
def delitem(s, name):
del _dict[name]
mock.__setitem__ = setitem
mock.__getitem__ = getitem
mock.__delitem__ = delitem
self.assertRaises(KeyError, lambda: mock['foo'])
mock['foo'] = 'bar'
self.assertEqual(_dict, {'foo': 'bar'})
self.assertEqual(mock['foo'], 'bar')
del mock['foo']
self.assertEqual(_dict, {})
def test_numeric(self):
original = mock = Mock()
mock.value = 0
self.assertRaises(TypeError, lambda: mock + 3)
def add(self, other):
mock.value += other
return self
mock.__add__ = add
self.assertEqual(mock + 3, mock)
self.assertEqual(mock.value, 3)
del mock.__add__
def iadd(mock):
mock += 3
self.assertRaises(TypeError, iadd, mock)
mock.__iadd__ = add
mock += 6
self.assertEqual(mock, original)
self.assertEqual(mock.value, 9)
self.assertRaises(TypeError, lambda: 3 + mock)
mock.__radd__ = add
self.assertEqual(7 + mock, mock)
self.assertEqual(mock.value, 16)
def test_division(self):
original = mock = Mock()
mock.value = 32
self.assertRaises(TypeError, lambda: mock / 2)
def truediv(self, other):
mock.value /= other
return self
mock.__truediv__ = truediv
self.assertEqual(mock / 2, mock)
self.assertEqual(mock.value, 16)
del mock.__truediv__
def itruediv(mock):
mock /= 4
self.assertRaises(TypeError, itruediv, mock)
mock.__itruediv__ = truediv
mock /= 8
self.assertEqual(mock, original)
self.assertEqual(mock.value, 2)
self.assertRaises(TypeError, lambda: 8 / mock)
mock.__rtruediv__ = truediv
self.assertEqual(0.5 / mock, mock)
self.assertEqual(mock.value, 4)
def test_hash(self):
mock = Mock()
# test delegation
self.assertEqual(hash(mock), Mock.__hash__(mock))
def _hash(s):
return 3
mock.__hash__ = _hash
self.assertEqual(hash(mock), 3)
def test_nonzero(self):
m = Mock()
self.assertTrue(bool(m))
m.__bool__ = lambda s: False
self.assertFalse(bool(m))
def test_comparison(self):
mock = Mock()
def comp(s, o):
return True
mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp
self. assertTrue(mock < 3)
self. assertTrue(mock > 3)
self. assertTrue(mock <= 3)
self. assertTrue(mock >= 3)
self.assertRaises(TypeError, lambda: MagicMock() < object())
self.assertRaises(TypeError, lambda: object() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > object())
self.assertRaises(TypeError, lambda: object() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= object())
self.assertRaises(TypeError, lambda: object() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= object())
self.assertRaises(TypeError, lambda: object() >= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= MagicMock())
def test_equality(self):
for mock in Mock(), MagicMock():
self.assertEqual(mock == mock, True)
self.assertIsInstance(mock == mock, bool)
self.assertEqual(mock != mock, False)
self.assertIsInstance(mock != mock, bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
def eq(self, other):
return other == 3
mock.__eq__ = eq
self.assertTrue(mock == 3)
self.assertFalse(mock == 4)
def ne(self, other):
return other == 3
mock.__ne__ = ne
self.assertTrue(mock != 3)
self.assertFalse(mock != 4)
mock = MagicMock()
mock.__eq__.return_value = True
self.assertIsInstance(mock == 3, bool)
self.assertEqual(mock == 3, True)
mock.__ne__.return_value = False
self.assertIsInstance(mock != 3, bool)
self.assertEqual(mock != 3, False)
def test_len_contains_iter(self):
mock = Mock()
self.assertRaises(TypeError, len, mock)
self.assertRaises(TypeError, iter, mock)
self.assertRaises(TypeError, lambda: 'foo' in mock)
mock.__len__ = lambda s: 6
self.assertEqual(len(mock), 6)
mock.__contains__ = lambda s, o: o == 3
self.assertIn(3, mock)
self.assertNotIn(6, mock)
mock.__iter__ = lambda s: iter('foobarbaz')
self.assertEqual(list(mock), list('foobarbaz'))
def test_magicmock(self):
mock = MagicMock()
mock.__iter__.return_value = iter([1, 2, 3])
self.assertEqual(list(mock), [1, 2, 3])
getattr(mock, '__bool__').return_value = False
self.assertFalse(hasattr(mock, '__nonzero__'))
self.assertFalse(bool(mock))
for entry in _magics:
self.assertTrue(hasattr(mock, entry))
self.assertFalse(hasattr(mock, '__imaginery__'))
def test_magic_mock_equality(self):
mock = MagicMock()
self.assertIsInstance(mock == object(), bool)
self.assertIsInstance(mock != object(), bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
self.assertEqual(mock == mock, True)
self.assertEqual(mock != mock, False)
def test_magicmock_defaults(self):
mock = MagicMock()
self.assertEqual(int(mock), 1)
self.assertEqual(complex(mock), 1j)
self.assertEqual(float(mock), 1.0)
self.assertNotIn(object(), mock)
self.assertEqual(len(mock), 0)
self.assertEqual(list(mock), [])
self.assertEqual(hash(mock), object.__hash__(mock))
self.assertEqual(str(mock), object.__str__(mock))
self.assertTrue(bool(mock))
# in Python 3 oct and hex use __index__
# so these tests are for __index__ in py3k
self.assertEqual(oct(mock), '0o1')
self.assertEqual(hex(mock), '0x1')
# how to test __sizeof__ ?
def test_magic_methods_and_spec(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_magic_methods_and_spec_set(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec_set=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec_set=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec_set=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_setting_unsupported_magic_method(self):
mock = MagicMock()
def set_setattr():
mock.__setattr__ = lambda self, name: None
self.assertRaisesRegex(AttributeError,
"Attempting to set unsupported magic method '__setattr__'.",
set_setattr
)
def test_attributes_and_return_value(self):
mock = MagicMock()
attr = mock.foo
def _get_type(obj):
# the type of every mock (or magicmock) is a custom subclass
# so the real type is the second in the mro
return type(obj).__mro__[1]
self.assertEqual(_get_type(attr), MagicMock)
returned = mock()
self.assertEqual(_get_type(returned), MagicMock)
def test_magic_methods_are_magic_mocks(self):
mock = MagicMock()
self.assertIsInstance(mock.__getitem__, MagicMock)
mock[1][2].__getitem__.return_value = 3
self.assertEqual(mock[1][2][3], 3)
def test_magic_method_reset_mock(self):
mock = MagicMock()
str(mock)
self.assertTrue(mock.__str__.called)
mock.reset_mock()
self.assertFalse(mock.__str__.called)
def test_dir(self):
# overriding the default implementation
for mock in Mock(), MagicMock():
def _dir(self):
return ['foo']
mock.__dir__ = _dir
self.assertEqual(dir(mock), ['foo'])
@unittest.skipIf('PyPy' in sys.version, "This fails differently on pypy")
def test_bound_methods(self):
m = Mock()
# XXXX should this be an expected failure instead?
# this seems like it should work, but is hard to do without introducing
# other api inconsistencies. Failure message could be better though.
m.__iter__ = [3].__iter__
self.assertRaises(TypeError, iter, m)
def test_magic_method_type(self):
class Foo(MagicMock):
pass
foo = Foo()
self.assertIsInstance(foo.__int__, Foo)
def test_descriptor_from_class(self):
m = MagicMock()
type(m).__str__.return_value = 'foo'
self.assertEqual(str(m), 'foo')
def test_iterable_as_iter_return_value(self):
m = MagicMock()
m.__iter__.return_value = [1, 2, 3]
self.assertEqual(list(m), [1, 2, 3])
self.assertEqual(list(m), [1, 2, 3])
m.__iter__.return_value = iter([4, 5, 6])
self.assertEqual(list(m), [4, 5, 6])
self.assertEqual(list(m), [])
def test_matmul(self):
m = MagicMock()
self.assertIsInstance(m @ 1, MagicMock)
m.__matmul__.return_value = 42
m.__rmatmul__.return_value = 666
m.__imatmul__.return_value = 24
self.assertEqual(m @ 1, 42)
self.assertEqual(1 @ m, 666)
m @= 24
self.assertEqual(m, 24)
def test_divmod_and_rdivmod(self):
m = MagicMock()
self.assertIsInstance(divmod(5, m), MagicMock)
m.__divmod__.return_value = (2, 1)
self.assertEqual(divmod(m, 2), (2, 1))
m = MagicMock()
foo = divmod(2, m)
self.assertIsInstance(foo, MagicMock)
foo_direct = m.__divmod__(2)
self.assertIsInstance(foo_direct, MagicMock)
bar = divmod(m, 2)
self.assertIsInstance(bar, MagicMock)
bar_direct = m.__rdivmod__(2)
self.assertIsInstance(bar_direct, MagicMock)
# http://bugs.python.org/issue23310
# Check if you can change behaviour of magic methds in MagicMock init
def test_magic_in_initialization(self):
m = MagicMock(**{'__str__.return_value': "12"})
self.assertEqual(str(m), "12")
def test_changing_magic_set_in_initialization(self):
m = MagicMock(**{'__str__.return_value': "12"})
m.__str__.return_value = "13"
self.assertEqual(str(m), "13")
m = MagicMock(**{'__str__.return_value': "12"})
m.configure_mock(**{'__str__.return_value': "14"})
self.assertEqual(str(m), "14")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "2d760257e3a3bca036b9dd23f3ce1062",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 79,
"avg_line_length": 30.792735042735043,
"alnum_prop": 0.5627645548539311,
"repo_name": "mindbender-studio/setup",
"id": "24569b532ded9d014c96c9e4c46ba49dbfdc6773",
"size": "14411",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "bin/windows/python36/Lib/unittest/test/testmock/testmagicmethods.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3672"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "JavaScript",
"bytes": "13629"
},
{
"name": "PowerShell",
"bytes": "1447"
},
{
"name": "Python",
"bytes": "14071127"
},
{
"name": "QML",
"bytes": "2133450"
},
{
"name": "Shell",
"bytes": "3997"
},
{
"name": "Standard ML",
"bytes": "478"
}
],
"symlink_target": ""
} |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# action_derivedsourceslist.py generates a single cpp file that includes
# all v8 bindings cpp files generated from idls. Files can be assigned into
# multiple output files, to reduce maximum compilation unit size and allow
# parallel compilation.
#
# usage: action_derivedsourceslist.py IDL_FILES_LIST -- OUTPUT_FILE1 OUTPUT_FILE2 ...
#
# Note that IDL_FILES_LIST is a text file containing the IDL file paths.
import errno
import os
import os.path
import re
import subprocess
import sys
# A regexp for finding Conditional attributes in interface definitions.
conditionalPattern = re.compile('interface[\s]*\[[^\]]*Conditional=([\_0-9a-zA-Z&|]*)')
copyrightTemplate = """
"""
# Wraps conditional with ENABLE() and replace '&','|' with '&&','||' if more than one conditional is specified.
def formatConditional(conditional):
def wrapWithEnable(s):
if re.match('[|&]$', s):
return s * 2
return 'ENABLE(' + s + ')'
return ' '.join(map(wrapWithEnable, conditional))
# Find the conditional interface attribute.
def extractConditional(idlFilePath):
conditional = None
# Read file and look for "interface [ Conditional=XXX ]".
idlFile = open(idlFilePath)
idlContents = idlFile.read().replace('\n', '')
idlFile.close()
match = conditionalPattern.search(idlContents)
if match:
conditional = match.group(1)
conditional = re.split('([|&])', conditional)
return conditional
# Extracts conditional and interface name from each IDL file.
def extractMetaData(filePaths):
metaDataList = []
for f in filePaths:
metaData = {}
if len(f) == 0:
continue
if not os.path.exists(f):
print 'WARNING: file not found: "%s"' % f
continue
# Extract type name from file name
(parentPath, fileName) = os.path.split(f)
(interfaceName, ext) = os.path.splitext(fileName)
if not ext == '.idl':
continue
metaData = {
'conditional': extractConditional(f),
'name': interfaceName,
}
metaDataList.append(metaData)
return metaDataList
def generateContent(filesMetaData, partition, totalPartitions):
# Sort files by conditionals.
filesMetaData.sort()
output = []
# Add fixed content.
output.append(copyrightTemplate)
output.append('#define NO_IMPLICIT_ATOMICSTRING\n\n')
# List all includes segmented by if and endif.
prevConditional = None
for metaData in filesMetaData:
name = metaData['name']
if (hash(name) % totalPartitions) != partition:
continue
conditional = metaData['conditional']
if prevConditional and prevConditional != conditional:
output.append('#endif\n')
if conditional and prevConditional != conditional:
output.append('\n#if %s\n' % formatConditional(conditional))
output.append('#include "bindings/V8%s.cpp"\n' % name)
prevConditional = conditional
if prevConditional:
output.append('#endif\n')
return ''.join(output)
def writeContent(content, outputFileName):
(parentPath, fileName) = os.path.split(outputFileName)
if not os.path.exists(parentPath):
print parentPath
os.mkdir(parentPath)
f = open(outputFileName, 'w')
f.write(content)
f.close()
def main(args):
assert(len(args) > 3)
inOutBreakIndex = args.index('--')
inputFileName = args[1]
outputFileNames = args[inOutBreakIndex+1:]
inputFile = open(inputFileName, 'r')
idlFileNames = inputFile.read().split('\n')
inputFile.close()
filesMetaData = extractMetaData(idlFileNames)
for fileName in outputFileNames:
print 'Generating derived sources list into %s...' % fileName
partition = outputFileNames.index(fileName)
fileContents = generateContent(filesMetaData, partition, len(outputFileNames))
writeContent(fileContents, fileName)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| {
"content_hash": "dec26a27ddc3345d351a26a4d1080e5d",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 111,
"avg_line_length": 32.85795454545455,
"alnum_prop": 0.6913366764655023,
"repo_name": "Xperia-Nicki/android_platform_sony_nicki",
"id": "c9c5bf5c31859729bdca54ecebc927125dcda932",
"size": "7256",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "external/webkit/Source/WebCore/WebCore.gyp/scripts/action_derivedsourcesallinone.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "Assembly",
"bytes": "212775"
},
{
"name": "Awk",
"bytes": "19252"
},
{
"name": "C",
"bytes": "68667466"
},
{
"name": "C#",
"bytes": "55625"
},
{
"name": "C++",
"bytes": "54670920"
},
{
"name": "CLIPS",
"bytes": "12224"
},
{
"name": "CSS",
"bytes": "283405"
},
{
"name": "D",
"bytes": "1931"
},
{
"name": "Java",
"bytes": "4882"
},
{
"name": "JavaScript",
"bytes": "19597804"
},
{
"name": "Objective-C",
"bytes": "5849156"
},
{
"name": "PHP",
"bytes": "17224"
},
{
"name": "Pascal",
"bytes": "42411"
},
{
"name": "Perl",
"bytes": "1632149"
},
{
"name": "Prolog",
"bytes": "214621"
},
{
"name": "Python",
"bytes": "3493321"
},
{
"name": "R",
"bytes": "290"
},
{
"name": "Ruby",
"bytes": "78743"
},
{
"name": "Scilab",
"bytes": "554"
},
{
"name": "Shell",
"bytes": "265637"
},
{
"name": "TypeScript",
"bytes": "45459"
},
{
"name": "XSLT",
"bytes": "11219"
}
],
"symlink_target": ""
} |
DEBUG = False
USE_EMBEDDED_PYSERIAL = True
BAUD = 115200
if USE_EMBEDDED_PYSERIAL:
from os import sys, path
thisdir = path.dirname(path.abspath(__file__))
sys.path.append(thisdir)
import serial
#----- PORTSCAN ----------------------------------------------------------------
# This will check for the portscan.cache file with a remembered serial port
# identifier. If that exists, it will just use the serial port identified
# in that file. If the file does not exist, it performs a workflow that
# prompts the user. Note that if you plug your device into a different
# USB port or use a different USB hub configuration, this may renumber
# the port and require a re-scan. Just delete the portscan.cache file
# and re-run your app code, if that happens.
import portscan
name = portscan.getName()
if name != None:
if DEBUG:
print("Using port:" + name)
PORT = name
else:
name = portscan.find()
if name == None:
raise ValueError("No port selected, giving in")
PORT = name
print("Your device has been detected")
print("Now running your program...")
#----- CONFIGURE SERIAL PORT ---------------------------------------------------
s = serial.Serial(PORT)
s.baudrate = BAUD
s.parity = serial.PARITY_NONE
s.databits = serial.EIGHTBITS
s.stopbits = serial.STOPBITS_ONE
s.timeout = 0 # non blocking mode
s.close()
s.port = PORT
s.open()
#----- SERIAL PORT READ AND WRITE ENGINE --------------------------------------
line_buffer = ""
rec_buffer = None
def read_waiting():
"""Poll the serial and fill up rec_buffer if something comes in"""
global rec_buffer
if rec_buffer != None:
return True
line = process_serial()
if line != None:
rec_buffer = line
return True
return False
def read():
"""Poll the rec_buffer and remove next line from it if there is a line in it"""
global rec_buffer
if not read_waiting():
return None
rec = rec_buffer
rec_buffer = None
##print("read:" + rec)
return rec
def process_serial():
"""Low level serial poll function"""
global line_buffer
while True:
data = s.read(1)
if len(data) == 0:
return None # no new data has been received
data = data[0]
if data == '\n':
pass # strip newline
elif data[0] == '\r':
line = line_buffer
line_buffer = ""
##print(line)
return line
else:
line_buffer += data
#----- ADAPTOR ----------------------------------------------------------------
# This is here, so you can change the concurrency and blocking model,
# independently of the underlying code, to adapt to how your app wants
# to interact with the serial port.
# NOTE: This is configured for non blocking send and receive, but no threading
# and no callback handling.
def send_message(msg):
"""Send a message to the device.
It is the callers responsibility to add newlines if you want them.
"""
##print("Sending:%s" % msg)
s.write(msg)
def get_next_message():
"""Receive a single line of text from the device.
Newline characters are pre-stripped from the end.
If there is not a complete line waiting, returns None.
Call this regularly to 'pump' the receive engine.
"""
result = read()
##if result != None:
## print("get_next_message:%s" % str(result))
return result
# END
| {
"content_hash": "ac8911c99c0cb165f4f98dd73da71e3b",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 83,
"avg_line_length": 25.394160583941606,
"alnum_prop": 0.5972980741592412,
"repo_name": "whaleygeek/mb_sdcard",
"id": "5c59d2bf8c070d48ab5b3d3d35c2bd184cfd87f8",
"size": "4223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/microbit/auto_serial/auto_serial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "282475"
}
],
"symlink_target": ""
} |
"""Implementation of SQLAlchemy backend."""
import collections
import copy
import datetime
import functools
import sys
import threading
import time
import uuid
from oslo.config import cfg
from oslo.db import exception as db_exc
from oslo.db.sqlalchemy import session as db_session
from oslo.db.sqlalchemy import utils as sqlalchemyutils
from oslo.utils import excutils
from oslo.utils import timeutils
import six
from sqlalchemy import and_
from sqlalchemy import Boolean
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.orm import aliased
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.orm import noload
from sqlalchemy.orm import undefer
from sqlalchemy.schema import Table
from sqlalchemy import sql
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql import false
from sqlalchemy.sql import func
from sqlalchemy.sql import null
from sqlalchemy.sql import true
from sqlalchemy import String
from nova import block_device
from nova.compute import task_states
from nova.compute import vm_states
import nova.context
from nova.db.sqlalchemy import models
from nova import exception
from nova.i18n import _, _LI, _LE, _LW
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import quota
db_opts = [
cfg.StrOpt('osapi_compute_unique_server_name_scope',
default='',
help='When set, compute API will consider duplicate hostnames '
'invalid within the specified scope, regardless of case. '
'Should be empty, "project" or "global".'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
LOG = logging.getLogger(__name__)
_ENGINE_FACADE = None
_LOCK = threading.Lock()
def _create_facade_lazily():
global _LOCK, _ENGINE_FACADE
if _ENGINE_FACADE is None:
with _LOCK:
if _ENGINE_FACADE is None:
_ENGINE_FACADE = db_session.EngineFacade.from_config(CONF)
return _ENGINE_FACADE
def get_engine(use_slave=False):
facade = _create_facade_lazily()
return facade.get_engine(use_slave=use_slave)
def get_session(use_slave=False, **kwargs):
facade = _create_facade_lazily()
return facade.get_session(use_slave=use_slave, **kwargs)
_SHADOW_TABLE_PREFIX = 'shadow_'
_DEFAULT_QUOTA_NAME = 'default'
PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks']
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_admin_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`nova.context.authorize_project_context` and
:py:func:`nova.context.authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_instance_exists_using_uuid(f):
"""Decorator to require the specified instance to exist.
Requires the wrapped function to use context and instance_uuid as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, instance_uuid, *args, **kwargs):
instance_get_by_uuid(context, instance_uuid)
return f(context, instance_uuid, *args, **kwargs)
return wrapper
def require_aggregate_exists(f):
"""Decorator to require the specified aggregate to exist.
Requires the wrapped function to use context and aggregate_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, aggregate_id, *args, **kwargs):
aggregate_get(context, aggregate_id)
return f(context, aggregate_id, *args, **kwargs)
return wrapper
def _retry_on_deadlock(f):
"""Decorator to retry a DB API call if Deadlock was received."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
while True:
try:
return f(*args, **kwargs)
except db_exc.DBDeadlock:
LOG.warning(_LW("Deadlock detected when running "
"'%(func_name)s': Retrying..."),
dict(func_name=f.__name__))
# Retry!
time.sleep(0.5)
continue
functools.update_wrapper(wrapped, f)
return wrapped
def model_query(context, model,
args=None,
session=None,
use_slave=False,
read_deleted=None,
project_only=False):
"""Query helper that accounts for context's `read_deleted` field.
:param context: NovaContext of the query.
:param model: Model to query. Must be a subclass of ModelBase.
:param args: Arguments to query. If None - model is used.
:param session: If present, the session to use.
:param use_slave: If true, use a slave connection to the DB if creating a
session.
:param read_deleted: If not None, overrides context's read_deleted field.
Permitted values are 'no', which does not return
deleted values; 'only', which only returns deleted
values; and 'yes', which does not filter deleted
values.
:param project_only: If set and context is user-type, then restrict
query to match the context's project_id. If set to
'allow_none', restriction includes project_id = None.
"""
if session is None:
if CONF.database.slave_connection == '':
use_slave = False
session = get_session(use_slave=use_slave)
if read_deleted is None:
read_deleted = context.read_deleted
query_kwargs = {}
if 'no' == read_deleted:
query_kwargs['deleted'] = False
elif 'only' == read_deleted:
query_kwargs['deleted'] = True
elif 'yes' == read_deleted:
pass
else:
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
query = sqlalchemyutils.model_query(model, session, args, **query_kwargs)
# We can't use oslo.db model_query's project_id here, as it doesn't allow
# us to return both our projects and unowned projects.
if nova.context.is_user_context(context) and project_only:
if project_only == 'allow_none':
query = query.\
filter(or_(model.project_id == context.project_id,
model.project_id == null()))
else:
query = query.filter_by(project_id=context.project_id)
return query
def convert_objects_related_datetimes(values, *datetime_keys):
for key in datetime_keys:
if key in values and values[key]:
if isinstance(values[key], six.string_types):
values[key] = timeutils.parse_strtime(values[key])
# NOTE(danms): Strip UTC timezones from datetimes, since they're
# stored that way in the database
values[key] = values[key].replace(tzinfo=None)
return values
def _sync_instances(context, project_id, user_id, session):
return dict(zip(('instances', 'cores', 'ram'),
_instance_data_get_for_user(
context, project_id, user_id, session)))
def _sync_floating_ips(context, project_id, user_id, session):
return dict(floating_ips=_floating_ip_count_by_project(
context, project_id, session))
def _sync_fixed_ips(context, project_id, user_id, session):
return dict(fixed_ips=_fixed_ip_count_by_project(
context, project_id, session))
def _sync_security_groups(context, project_id, user_id, session):
return dict(security_groups=_security_group_count_by_project_and_user(
context, project_id, user_id, session))
def _sync_server_groups(context, project_id, user_id, session):
return dict(server_groups=_instance_group_count_by_project_and_user(
context, project_id, user_id, session))
QUOTA_SYNC_FUNCTIONS = {
'_sync_instances': _sync_instances,
'_sync_floating_ips': _sync_floating_ips,
'_sync_fixed_ips': _sync_fixed_ips,
'_sync_security_groups': _sync_security_groups,
'_sync_server_groups': _sync_server_groups,
}
###################
def constraint(**conditions):
return Constraint(conditions)
def equal_any(*values):
return EqualityCondition(values)
def not_equal(*values):
return InequalityCondition(values)
class Constraint(object):
def __init__(self, conditions):
self.conditions = conditions
def apply(self, model, query):
for key, condition in self.conditions.iteritems():
for clause in condition.clauses(getattr(model, key)):
query = query.filter(clause)
return query
class EqualityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
# method signature requires us to return an iterable even if for OR
# operator this will actually be a single clause
return [or_(*[field == value for value in self.values])]
class InequalityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return [field != value for value in self.values]
###################
@require_admin_context
def service_destroy(context, service_id):
session = get_session()
with session.begin():
count = model_query(context, models.Service, session=session).\
filter_by(id=service_id).\
soft_delete(synchronize_session=False)
if count == 0:
raise exception.ServiceNotFound(service_id=service_id)
model_query(context, models.ComputeNode, session=session).\
filter_by(service_id=service_id).\
soft_delete(synchronize_session=False)
def _service_get(context, service_id, with_compute_node=True, session=None,
use_slave=False):
query = model_query(context, models.Service, session=session,
use_slave=use_slave).\
filter_by(id=service_id)
if with_compute_node:
query = query.options(joinedload('compute_node'))
result = query.first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def service_get(context, service_id, with_compute_node=False,
use_slave=False):
return _service_get(context, service_id,
with_compute_node=with_compute_node,
use_slave=use_slave)
@require_admin_context
def service_get_all(context, disabled=None):
query = model_query(context, models.Service)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@require_admin_context
def service_get_all_by_topic(context, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(topic=topic).\
all()
@require_admin_context
def service_get_by_host_and_topic(context, host, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
@require_admin_context
def service_get_all_by_host(context, host):
return model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
all()
@require_admin_context
def service_get_by_compute_host(context, host, use_slave=False):
result = model_query(context, models.Service, read_deleted="no",
use_slave=use_slave).\
options(joinedload('compute_node')).\
filter_by(host=host).\
filter_by(topic=CONF.compute_topic).\
first()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
@require_admin_context
def service_get_by_args(context, host, binary):
result = model_query(context, models.Service).\
filter_by(host=host).\
filter_by(binary=binary).\
first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@require_admin_context
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
try:
service_ref.save()
except db_exc.DBDuplicateEntry as e:
if 'binary' in e.columns:
raise exception.ServiceBinaryExists(host=values.get('host'),
binary=values.get('binary'))
raise exception.ServiceTopicExists(host=values.get('host'),
topic=values.get('topic'))
return service_ref
@require_admin_context
@_retry_on_deadlock
def service_update(context, service_id, values):
session = get_session()
with session.begin():
service_ref = _service_get(context, service_id,
with_compute_node=False, session=session)
service_ref.update(values)
return service_ref
###################
def compute_node_get(context, compute_id):
return _compute_node_get(context, compute_id)
def _compute_node_get(context, compute_id, session=None):
result = model_query(context, models.ComputeNode, session=session).\
filter_by(id=compute_id).\
options(joinedload('service')).\
first()
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
return result
@require_admin_context
def compute_nodes_get_by_service_id(context, service_id):
result = model_query(context, models.ComputeNode, read_deleted='no').\
filter_by(service_id=service_id).\
all()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def compute_node_get_by_host_and_nodename(context, host, nodename):
result = model_query(context, models.ComputeNode, read_deleted='no').\
filter_by(host=host, hypervisor_hostname=nodename).\
first()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
@require_admin_context
def compute_node_get_all_by_host(context, host, use_slave=False):
result = model_query(context, models.ComputeNode, read_deleted='no',
use_slave=use_slave).\
filter_by(host=host).\
all()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
@require_admin_context
def compute_node_get_all(context, no_date_fields):
# NOTE(msdubov): Using lower-level 'select' queries and joining the tables
# manually here allows to gain 3x speed-up and to have 5x
# less network load / memory usage compared to the sqla ORM.
engine = get_engine()
# Retrieve ComputeNode, Service
compute_node = models.ComputeNode.__table__
service = models.Service.__table__
with engine.begin() as conn:
redundant_columns = set(['deleted_at', 'created_at', 'updated_at',
'deleted']) if no_date_fields else set([])
def filter_columns(table):
return [c for c in table.c if c.name not in redundant_columns]
compute_node_query = sql.select(filter_columns(compute_node)).\
where(compute_node.c.deleted == 0).\
order_by(compute_node.c.service_id)
compute_node_rows = conn.execute(compute_node_query).fetchall()
service_query = sql.select(filter_columns(service)).\
where((service.c.deleted == 0) &
(service.c.binary == 'nova-compute')).\
order_by(service.c.id)
service_rows = conn.execute(service_query).fetchall()
# Join ComputeNode & Service manually.
services = {}
for proxy in service_rows:
services[proxy['id']] = dict(proxy.items())
compute_nodes = []
for proxy in compute_node_rows:
node = dict(proxy.items())
node['service'] = services.get(proxy['service_id'])
compute_nodes.append(node)
return compute_nodes
@require_admin_context
def compute_node_search_by_hypervisor(context, hypervisor_match):
field = models.ComputeNode.hypervisor_hostname
return model_query(context, models.ComputeNode).\
options(joinedload('service')).\
filter(field.like('%%%s%%' % hypervisor_match)).\
all()
@require_admin_context
def compute_node_create(context, values):
"""Creates a new ComputeNode and populates the capacity fields
with the most recent data.
"""
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
convert_objects_related_datetimes(values, *datetime_keys)
compute_node_ref = models.ComputeNode()
compute_node_ref.update(values)
compute_node_ref.save()
return compute_node_ref
@require_admin_context
@_retry_on_deadlock
def compute_node_update(context, compute_id, values):
"""Updates the ComputeNode record with the most recent data."""
session = get_session()
with session.begin():
compute_ref = _compute_node_get(context, compute_id, session=session)
# Always update this, even if there's going to be no other
# changes in data. This ensures that we invalidate the
# scheduler cache of compute node data in case of races.
values['updated_at'] = timeutils.utcnow()
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
convert_objects_related_datetimes(values, *datetime_keys)
compute_ref.update(values)
return compute_ref
@require_admin_context
def compute_node_delete(context, compute_id):
"""Delete a ComputeNode record."""
session = get_session()
with session.begin():
result = model_query(context, models.ComputeNode, session=session).\
filter_by(id=compute_id).\
soft_delete(synchronize_session=False)
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
def compute_node_statistics(context):
"""Compute statistics over all compute nodes."""
result = model_query(context,
models.ComputeNode, (
func.count(models.ComputeNode.id),
func.sum(models.ComputeNode.vcpus),
func.sum(models.ComputeNode.memory_mb),
func.sum(models.ComputeNode.local_gb),
func.sum(models.ComputeNode.vcpus_used),
func.sum(models.ComputeNode.memory_mb_used),
func.sum(models.ComputeNode.local_gb_used),
func.sum(models.ComputeNode.free_ram_mb),
func.sum(models.ComputeNode.free_disk_gb),
func.sum(models.ComputeNode.current_workload),
func.sum(models.ComputeNode.running_vms),
func.sum(models.ComputeNode.disk_available_least),
), read_deleted="no").\
filter(models.Service.disabled == false()).\
filter(
models.Service.id ==
models.ComputeNode.service_id).\
first()
# Build a dict of the info--making no assumptions about result
fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
'current_workload', 'running_vms', 'disk_available_least')
return {field: int(result[idx] or 0)
for idx, field in enumerate(fields)}
###################
@require_admin_context
def certificate_create(context, values):
certificate_ref = models.Certificate()
for (key, value) in values.iteritems():
certificate_ref[key] = value
certificate_ref.save()
return certificate_ref
@require_admin_context
def certificate_get_all_by_project(context, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@require_admin_context
def certificate_get_all_by_user(context, user_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
all()
@require_admin_context
def certificate_get_all_by_user_and_project(context, user_id, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
all()
###################
@require_context
def floating_ip_get(context, id):
try:
result = model_query(context, models.FloatingIp, project_only=True).\
filter_by(id=id).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFound(id=id)
except db_exc.DBError:
msg = _("Invalid floating ip id %s in request") % id
LOG.warn(msg)
raise exception.InvalidID(id=id)
return result
@require_context
def floating_ip_get_pools(context):
pools = []
for result in model_query(context, models.FloatingIp,
(models.FloatingIp.pool,)).distinct():
pools.append({'name': result[0]})
return pools
@require_context
def floating_ip_allocate_address(context, project_id, pool,
auto_assigned=False):
nova.context.authorize_project_context(context, project_id)
session = get_session()
with session.begin():
floating_ip_ref = model_query(context, models.FloatingIp,
session=session, read_deleted="no").\
filter_by(fixed_ip_id=None).\
filter_by(project_id=None).\
filter_by(pool=pool).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not floating_ip_ref:
raise exception.NoMoreFloatingIps()
floating_ip_ref['project_id'] = project_id
floating_ip_ref['auto_assigned'] = auto_assigned
session.add(floating_ip_ref)
return floating_ip_ref['address']
@require_context
def floating_ip_bulk_create(context, ips, want_result=True):
session = get_session()
with session.begin():
try:
tab = models.FloatingIp().__table__
session.execute(tab.insert(), ips)
except db_exc.DBDuplicateEntry as e:
raise exception.FloatingIpExists(address=e.value)
if want_result:
return model_query(
context, models.FloatingIp, session=session).filter(
models.FloatingIp.address.in_(
[ip['address'] for ip in ips])).all()
def _ip_range_splitter(ips, block_size=256):
"""Yields blocks of IPs no more than block_size elements long."""
out = []
count = 0
for ip in ips:
out.append(ip['address'])
count += 1
if count > block_size - 1:
yield out
out = []
count = 0
if out:
yield out
@require_context
def floating_ip_bulk_destroy(context, ips):
session = get_session()
with session.begin():
project_id_to_quota_count = collections.defaultdict(int)
for ip_block in _ip_range_splitter(ips):
# Find any floating IPs that were not auto_assigned and
# thus need quota released.
query = model_query(context, models.FloatingIp).\
filter(models.FloatingIp.address.in_(ip_block)).\
filter_by(auto_assigned=False)
rows = query.all()
for row in rows:
# The count is negative since we release quota by
# reserving negative quota.
project_id_to_quota_count[row['project_id']] -= 1
# Delete the floating IPs.
model_query(context, models.FloatingIp).\
filter(models.FloatingIp.address.in_(ip_block)).\
soft_delete(synchronize_session='fetch')
# Delete the quotas, if needed.
for project_id, count in project_id_to_quota_count.iteritems():
try:
reservations = quota.QUOTAS.reserve(context,
project_id=project_id,
floating_ips=count)
quota.QUOTAS.commit(context,
reservations,
project_id=project_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to update usages bulk "
"deallocating floating IP"))
@require_context
def floating_ip_create(context, values):
floating_ip_ref = models.FloatingIp()
floating_ip_ref.update(values)
try:
floating_ip_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
return floating_ip_ref
def _floating_ip_count_by_project(context, project_id, session=None):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why leave auto_assigned floating IPs out?
return model_query(context, models.FloatingIp, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
count()
@require_context
@_retry_on_deadlock
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
session = get_session()
with session.begin():
floating_ip_ref = _floating_ip_get_by_address(context,
floating_address,
session=session)
fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
filter_by(address=fixed_address).\
options(joinedload('network')).\
first()
if floating_ip_ref.fixed_ip_id == fixed_ip_ref["id"]:
return None
floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"]
floating_ip_ref.host = host
return fixed_ip_ref
@require_context
@_retry_on_deadlock
def floating_ip_deallocate(context, address):
session = get_session()
with session.begin():
return model_query(context, models.FloatingIp, session=session).\
filter_by(address=address).\
filter(models.FloatingIp.project_id != null()).\
update({'project_id': None,
'host': None,
'auto_assigned': False},
synchronize_session=False)
@require_context
def floating_ip_destroy(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
delete()
@require_context
def floating_ip_disassociate(context, address):
session = get_session()
with session.begin():
floating_ip_ref = model_query(context,
models.FloatingIp,
session=session).\
filter_by(address=address).\
first()
if not floating_ip_ref:
raise exception.FloatingIpNotFoundForAddress(address=address)
fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
filter_by(id=floating_ip_ref['fixed_ip_id']).\
options(joinedload('network')).\
first()
floating_ip_ref.fixed_ip_id = None
floating_ip_ref.host = None
return fixed_ip_ref
def _floating_ip_get_all(context, session=None):
return model_query(context, models.FloatingIp, read_deleted="no",
session=session)
@require_admin_context
def floating_ip_get_all(context):
floating_ip_refs = _floating_ip_get_all(context).\
options(joinedload('fixed_ip')).\
all()
if not floating_ip_refs:
raise exception.NoFloatingIpsDefined()
return floating_ip_refs
@require_admin_context
def floating_ip_get_all_by_host(context, host):
floating_ip_refs = _floating_ip_get_all(context).\
filter_by(host=host).\
options(joinedload('fixed_ip')).\
all()
if not floating_ip_refs:
raise exception.FloatingIpNotFoundForHost(host=host)
return floating_ip_refs
@require_context
def floating_ip_get_all_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
return _floating_ip_get_all(context).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
options(joinedload_all('fixed_ip.instance')).\
all()
@require_context
def floating_ip_get_by_address(context, address):
return _floating_ip_get_by_address(context, address)
def _floating_ip_get_by_address(context, address, session=None):
# if address string is empty explicitly set it to None
if not address:
address = None
try:
result = model_query(context, models.FloatingIp, session=session).\
filter_by(address=address).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFoundForAddress(address=address)
except db_exc.DBError:
msg = _("Invalid floating IP %s in request") % address
LOG.warn(msg)
raise exception.InvalidIpAddressError(msg)
# If the floating IP has a project ID set, check to make sure
# the non-admin user has access.
if result.project_id and nova.context.is_user_context(context):
nova.context.authorize_project_context(context, result.project_id)
return result
@require_context
def floating_ip_get_by_fixed_address(context, fixed_address):
return model_query(context, models.FloatingIp).\
outerjoin(models.FixedIp,
models.FixedIp.id ==
models.FloatingIp.fixed_ip_id).\
filter(models.FixedIp.address == fixed_address).\
all()
@require_context
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
return model_query(context, models.FloatingIp).\
filter_by(fixed_ip_id=fixed_ip_id).\
all()
@require_context
def floating_ip_update(context, address, values):
session = get_session()
with session.begin():
float_ip_ref = _floating_ip_get_by_address(context, address, session)
float_ip_ref.update(values)
try:
float_ip_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
return float_ip_ref
def _dnsdomain_get(context, session, fqdomain):
return model_query(context, models.DNSDomain,
session=session, read_deleted="no").\
filter_by(domain=fqdomain).\
with_lockmode('update').\
first()
@require_context
def dnsdomain_get(context, fqdomain):
session = get_session()
with session.begin():
return _dnsdomain_get(context, session, fqdomain)
def _dnsdomain_get_or_create(context, session, fqdomain):
domain_ref = _dnsdomain_get(context, session, fqdomain)
if not domain_ref:
dns_ref = models.DNSDomain()
dns_ref.update({'domain': fqdomain,
'availability_zone': None,
'project_id': None})
return dns_ref
return domain_ref
@require_admin_context
def dnsdomain_register_for_zone(context, fqdomain, zone):
session = get_session()
with session.begin():
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
domain_ref.scope = 'private'
domain_ref.availability_zone = zone
session.add(domain_ref)
@require_admin_context
def dnsdomain_register_for_project(context, fqdomain, project):
session = get_session()
with session.begin():
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
domain_ref.scope = 'public'
domain_ref.project_id = project
session.add(domain_ref)
@require_admin_context
def dnsdomain_unregister(context, fqdomain):
model_query(context, models.DNSDomain).\
filter_by(domain=fqdomain).\
delete()
def dnsdomain_get_all(context):
return model_query(context, models.DNSDomain, read_deleted="no").all()
###################
@require_admin_context
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved=False):
"""Keyword arguments:
reserved -- should be a boolean value(True or False), exact value will be
used to filter on the fixed ip address
"""
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == null())
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=reserved).\
filter_by(address=address).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if fixed_ip_ref is None:
raise exception.FixedIpNotFoundForNetwork(address=address,
network_uuid=network_id)
if fixed_ip_ref.instance_uuid:
raise exception.FixedIpAlreadyInUse(address=address,
instance_uuid=instance_uuid)
if not fixed_ip_ref.network_id:
fixed_ip_ref.network_id = network_id
fixed_ip_ref.instance_uuid = instance_uuid
session.add(fixed_ip_ref)
return fixed_ip_ref
@require_admin_context
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None):
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == null())
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=False).\
filter_by(instance_uuid=None).\
filter_by(host=None).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not fixed_ip_ref:
raise exception.NoMoreFixedIps(net=network_id)
if fixed_ip_ref['network_id'] is None:
fixed_ip_ref['network'] = network_id
if instance_uuid:
fixed_ip_ref['instance_uuid'] = instance_uuid
if host:
fixed_ip_ref['host'] = host
session.add(fixed_ip_ref)
return fixed_ip_ref
@require_context
def fixed_ip_create(context, values):
fixed_ip_ref = models.FixedIp()
fixed_ip_ref.update(values)
try:
fixed_ip_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FixedIpExists(address=values['address'])
return fixed_ip_ref
@require_context
def fixed_ip_bulk_create(context, ips):
engine = get_engine()
with engine.begin() as conn:
try:
tab = models.FixedIp.__table__
conn.execute(tab.insert(), ips)
except db_exc.DBDuplicateEntry as e:
raise exception.FixedIpExists(address=e.value)
@require_context
def fixed_ip_disassociate(context, address):
session = get_session()
with session.begin():
_fixed_ip_get_by_address(context, address, session=session).\
update({'instance_uuid': None,
'virtual_interface_id': None})
@require_admin_context
def fixed_ip_disassociate_all_by_timeout(context, host, time):
session = get_session()
# NOTE(vish): only update fixed ips that "belong" to this
# host; i.e. the network host or the instance
# host matches. Two queries necessary because
# join with update doesn't work.
with session.begin():
host_filter = or_(and_(models.Instance.host == host,
models.Network.multi_host == true()),
models.Network.host == host)
result = model_query(context, models.FixedIp, (models.FixedIp.id,),
read_deleted="no", session=session).\
filter(models.FixedIp.allocated == false()).\
filter(models.FixedIp.updated_at < time).\
join((models.Network,
models.Network.id == models.FixedIp.network_id)).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(host_filter).\
all()
fixed_ip_ids = [fip[0] for fip in result]
if not fixed_ip_ids:
return 0
result = model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.id.in_(fixed_ip_ids)).\
update({'instance_uuid': None,
'leased': False,
'updated_at': timeutils.utcnow()},
synchronize_session='fetch')
return result
@require_context
def fixed_ip_get(context, id, get_network=False):
query = model_query(context, models.FixedIp).filter_by(id=id)
if get_network:
query = query.options(joinedload('network'))
result = query.first()
if not result:
raise exception.FixedIpNotFound(id=id)
# FIXME(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
result['instance_uuid'])
nova.context.authorize_project_context(context, instance.project_id)
return result
@require_admin_context
def fixed_ip_get_all(context):
result = model_query(context, models.FixedIp, read_deleted="yes").all()
if not result:
raise exception.NoFixedIpsDefined()
return result
@require_context
def fixed_ip_get_by_address(context, address, columns_to_join=None):
return _fixed_ip_get_by_address(context, address,
columns_to_join=columns_to_join)
def _fixed_ip_get_by_address(context, address, session=None,
columns_to_join=None):
if session is None:
session = get_session()
if columns_to_join is None:
columns_to_join = []
with session.begin(subtransactions=True):
try:
result = model_query(context, models.FixedIp, session=session)
for column in columns_to_join:
result = result.options(joinedload_all(column))
result = result.filter_by(address=address).first()
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)
except db_exc.DBError:
msg = _("Invalid fixed IP Address %s in request") % address
LOG.warn(msg)
raise exception.FixedIpInvalid(msg)
# NOTE(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = _instance_get_by_uuid(
context.elevated(read_deleted='yes'),
result['instance_uuid'],
session
)
nova.context.authorize_project_context(context,
instance.project_id)
return result
@require_context
def fixed_ip_get_by_floating_address(context, floating_address):
return model_query(context, models.FixedIp).\
outerjoin(models.FloatingIp,
models.FloatingIp.fixed_ip_id ==
models.FixedIp.id).\
filter(models.FloatingIp.address == floating_address).\
first()
# NOTE(tr3buchet) please don't invent an exception here, empty list is fine
@require_context
def fixed_ip_get_by_instance(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
vif_and = and_(models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id,
models.VirtualInterface.deleted == 0)
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(instance_uuid=instance_uuid).\
outerjoin(models.VirtualInterface, vif_and).\
options(contains_eager("virtual_interface")).\
options(joinedload('network')).\
options(joinedload('floating_ips')).\
order_by(asc(models.VirtualInterface.created_at),
asc(models.VirtualInterface.id)).\
all()
if not result:
raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
return result
@require_admin_context
def fixed_ip_get_by_host(context, host):
session = get_session()
with session.begin():
instance_uuids = _instance_get_all_uuids_by_host(context, host,
session=session)
if not instance_uuids:
return []
return model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\
all()
@require_context
def fixed_ip_get_by_network_host(context, network_id, host):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(host=host).\
first()
if not result:
raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
host=host)
return result
@require_context
def fixed_ips_by_virtual_interface(context, vif_id):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(virtual_interface_id=vif_id).\
options(joinedload('network')).\
options(joinedload('floating_ips')).\
all()
return result
@require_context
def fixed_ip_update(context, address, values):
session = get_session()
with session.begin():
_fixed_ip_get_by_address(context, address, session=session).\
update(values)
def _fixed_ip_count_by_project(context, project_id, session=None):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.FixedIp, (models.FixedIp.id,),
read_deleted="no", session=session).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(models.Instance.project_id == project_id).\
count()
###################
@require_context
def virtual_interface_create(context, values):
"""Create a new virtual interface record in the database.
:param values: = dict containing column values
"""
try:
vif_ref = models.VirtualInterface()
vif_ref.update(values)
vif_ref.save()
except db_exc.DBError:
raise exception.VirtualInterfaceCreateException()
return vif_ref
def _virtual_interface_query(context, session=None, use_slave=False):
return model_query(context, models.VirtualInterface, session=session,
read_deleted="no", use_slave=use_slave)
@require_context
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table.
:param vif_id: = id of the virtual interface
"""
vif_ref = _virtual_interface_query(context).\
filter_by(id=vif_id).\
first()
return vif_ref
@require_context
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table.
:param address: = the address of the interface you're looking to get
"""
try:
vif_ref = _virtual_interface_query(context).\
filter_by(address=address).\
first()
except db_exc.DBError:
msg = _("Invalid virtual interface address %s in request") % address
LOG.warn(msg)
raise exception.InvalidIpAddressError(msg)
return vif_ref
@require_context
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table.
:param vif_uuid: the uuid of the interface you're looking to get
"""
vif_ref = _virtual_interface_query(context).\
filter_by(uuid=vif_uuid).\
first()
return vif_ref
@require_context
@require_instance_exists_using_uuid
def virtual_interface_get_by_instance(context, instance_uuid, use_slave=False):
"""Gets all virtual interfaces for instance.
:param instance_uuid: = uuid of the instance to retrieve vifs for
"""
vif_refs = _virtual_interface_query(context, use_slave=use_slave).\
filter_by(instance_uuid=instance_uuid).\
order_by(asc("created_at"), asc("id")).\
all()
return vif_refs
@require_context
def virtual_interface_get_by_instance_and_network(context, instance_uuid,
network_id):
"""Gets virtual interface for instance that's associated with network."""
vif_ref = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(network_id=network_id).\
first()
return vif_ref
@require_context
def virtual_interface_delete_by_instance(context, instance_uuid):
"""Delete virtual interface records that are associated
with the instance given by instance_id.
:param instance_uuid: = uuid of instance
"""
_virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
@require_context
def virtual_interface_get_all(context):
"""Get all vifs."""
vif_refs = _virtual_interface_query(context).all()
return vif_refs
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.iteritems():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
def _validate_unique_server_name(context, session, name):
if not CONF.osapi_compute_unique_server_name_scope:
return
lowername = name.lower()
base_query = model_query(context, models.Instance, session=session,
read_deleted='no').\
filter(func.lower(models.Instance.hostname) == lowername)
if CONF.osapi_compute_unique_server_name_scope == 'project':
instance_with_same_name = base_query.\
filter_by(project_id=context.project_id).\
count()
elif CONF.osapi_compute_unique_server_name_scope == 'global':
instance_with_same_name = base_query.count()
else:
msg = _('Unknown osapi_compute_unique_server_name_scope value: %s'
' Flag must be empty, "global" or'
' "project"') % CONF.osapi_compute_unique_server_name_scope
LOG.warn(msg)
return
if instance_with_same_name > 0:
raise exception.InstanceExists(name=lowername)
def _handle_objects_related_type_conversions(values):
"""Make sure that certain things in values (which may have come from
an objects.instance.Instance object) are in suitable form for the
database.
"""
# NOTE(danms): Make sure IP addresses are passed as strings to
# the database engine
for key in ('access_ip_v4', 'access_ip_v6'):
if key in values and values[key] is not None:
values[key] = str(values[key])
datetime_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at', 'scheduled_at')
convert_objects_related_datetimes(values, *datetime_keys)
def _check_instance_exists(context, session, instance_uuid):
if not model_query(context, models.Instance, session=session,
read_deleted="no").first():
raise exception.InstanceNotFound(instance_id=instance_uuid)
@require_context
def instance_create(context, values):
"""Create a new Instance record in the database.
context - request context object
values - dict containing column values.
"""
# NOTE(rpodolyaka): create the default security group, if it doesn't exist.
# This must be done in a separate transaction, so that this one is not
# aborted in case a concurrent one succeeds first and the unique constraint
# for security group names is violated by a concurrent INSERT
security_group_ensure_default(context)
values = values.copy()
values['metadata'] = _metadata_refs(
values.get('metadata'), models.InstanceMetadata)
values['system_metadata'] = _metadata_refs(
values.get('system_metadata'), models.InstanceSystemMetadata)
_handle_objects_related_type_conversions(values)
instance_ref = models.Instance()
if not values.get('uuid'):
values['uuid'] = str(uuid.uuid4())
instance_ref['info_cache'] = models.InstanceInfoCache()
info_cache = values.pop('info_cache', None)
if info_cache is not None:
instance_ref['info_cache'].update(info_cache)
security_groups = values.pop('security_groups', [])
instance_ref['extra'] = models.InstanceExtra()
instance_ref['extra'].update(
{'numa_topology': None,
'pci_requests': None,
})
instance_ref['extra'].update(values.pop('extra', {}))
instance_ref.update(values)
def _get_sec_group_models(session, security_groups):
models = []
default_group = _security_group_ensure_default(context, session)
if 'default' in security_groups:
models.append(default_group)
# Generate a new list, so we don't modify the original
security_groups = [x for x in security_groups if x != 'default']
if security_groups:
models.extend(_security_group_get_by_names(context,
session, context.project_id, security_groups))
return models
session = get_session()
with session.begin():
if 'hostname' in values:
_validate_unique_server_name(context, session, values['hostname'])
instance_ref.security_groups = _get_sec_group_models(session,
security_groups)
session.add(instance_ref)
# create the instance uuid to ec2_id mapping entry for instance
ec2_instance_create(context, instance_ref['uuid'])
return instance_ref
def _instance_data_get_for_user(context, project_id, user_id, session=None):
result = model_query(context,
models.Instance, (
func.count(models.Instance.id),
func.sum(models.Instance.vcpus),
func.sum(models.Instance.memory_mb),
), session=session).\
filter_by(project_id=project_id)
if user_id:
result = result.filter_by(user_id=user_id).first()
else:
result = result.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0, result[2] or 0)
@require_context
@_retry_on_deadlock
def instance_destroy(context, instance_uuid, constraint=None):
session = get_session()
with session.begin():
if uuidutils.is_uuid_like(instance_uuid):
instance_ref = _instance_get_by_uuid(context, instance_uuid,
session=session)
else:
raise exception.InvalidUUID(instance_uuid)
query = model_query(context, models.Instance, session=session).\
filter_by(uuid=instance_uuid)
if constraint is not None:
query = constraint.apply(models.Instance, query)
count = query.soft_delete()
if count == 0:
raise exception.ConstraintNotMet()
model_query(context, models.SecurityGroupInstanceAssociation,
session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceInfoCache, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceMetadata, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceFault, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceExtra, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
return instance_ref
@require_context
def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False):
return _instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join, use_slave=use_slave)
def _instance_get_by_uuid(context, uuid, session=None,
columns_to_join=None, use_slave=False):
result = _build_instance_get(context, session=session,
columns_to_join=columns_to_join,
use_slave=use_slave).\
filter_by(uuid=uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=uuid)
return result
@require_context
def instance_get(context, instance_id, columns_to_join=None):
try:
result = _build_instance_get(context, columns_to_join=columns_to_join
).filter_by(id=instance_id).first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
except db_exc.DBError:
# NOTE(sdague): catch all in case the db engine chokes on the
# id because it's too long of an int to store.
msg = _("Invalid instance id %s in request") % instance_id
LOG.warn(msg)
raise exception.InvalidID(id=instance_id)
def _build_instance_get(context, session=None,
columns_to_join=None, use_slave=False):
query = model_query(context, models.Instance, session=session,
project_only=True, use_slave=use_slave).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache'))
if columns_to_join is None:
columns_to_join = ['metadata', 'system_metadata']
for column in columns_to_join:
if column in ['info_cache', 'security_groups']:
# Already always joined above
continue
if 'extra.' in column:
query = query.options(undefer(column))
else:
query = query.options(joinedload(column))
# NOTE(alaski) Stop lazy loading of columns not needed.
for col in ['metadata', 'system_metadata']:
if col not in columns_to_join:
query = query.options(noload(col))
return query
def _instances_fill_metadata(context, instances,
manual_joins=None, use_slave=False):
"""Selectively fill instances with manually-joined metadata. Note that
instance will be converted to a dict.
:param context: security context
:param instances: list of instances to fill
:param manual_joins: list of tables to manually join (can be any
combination of 'metadata' and 'system_metadata' or
None to take the default of both)
"""
uuids = [inst['uuid'] for inst in instances]
if manual_joins is None:
manual_joins = ['metadata', 'system_metadata']
meta = collections.defaultdict(list)
if 'metadata' in manual_joins:
for row in _instance_metadata_get_multi(context, uuids,
use_slave=use_slave):
meta[row['instance_uuid']].append(row)
sys_meta = collections.defaultdict(list)
if 'system_metadata' in manual_joins:
for row in _instance_system_metadata_get_multi(context, uuids,
use_slave=use_slave):
sys_meta[row['instance_uuid']].append(row)
pcidevs = collections.defaultdict(list)
if 'pci_devices' in manual_joins:
for row in _instance_pcidevs_get_multi(context, uuids):
pcidevs[row['instance_uuid']].append(row)
filled_instances = []
for inst in instances:
inst = dict(inst.iteritems())
inst['system_metadata'] = sys_meta[inst['uuid']]
inst['metadata'] = meta[inst['uuid']]
if 'pci_devices' in manual_joins:
inst['pci_devices'] = pcidevs[inst['uuid']]
filled_instances.append(inst)
return filled_instances
def _manual_join_columns(columns_to_join):
"""Separate manually joined columns from columns_to_join
If columns_to_join contains 'metadata', 'system_metadata', or
'pci_devices' those columns are removed from columns_to_join and added
to a manual_joins list to be used with the _instances_fill_metadata method.
The columns_to_join formal parameter is copied and not modified, the return
tuple has the modified columns_to_join list to be used with joinedload in
a model query.
:param:columns_to_join: List of columns to join in a model query.
:return: tuple of (manual_joins, columns_to_join)
"""
manual_joins = []
columns_to_join_new = copy.copy(columns_to_join)
for column in ('metadata', 'system_metadata', 'pci_devices'):
if column in columns_to_join_new:
columns_to_join_new.remove(column)
manual_joins.append(column)
return manual_joins, columns_to_join_new
@require_context
def instance_get_all(context, columns_to_join=None):
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
query = model_query(context, models.Instance)
for column in columns_to_join_new:
query = query.options(joinedload(column))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
query = query.filter_by(project_id=context.project_id)
else:
query = query.filter_by(user_id=context.user_id)
instances = query.all()
return _instances_fill_metadata(context, instances, manual_joins)
@require_context
def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
limit=None, marker=None, columns_to_join=None,
use_slave=False):
"""Return instances matching all filters sorted by the primary key.
See instance_get_all_by_filters_sort for more information.
"""
# Invoke the API with the multiple sort keys and directions using the
# single sort key/direction
return instance_get_all_by_filters_sort(context, filters, limit=limit,
marker=marker,
columns_to_join=columns_to_join,
use_slave=use_slave,
sort_keys=[sort_key],
sort_dirs=[sort_dir])
@require_context
def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
columns_to_join=None, use_slave=False,
sort_keys=None, sort_dirs=None):
"""Return instances that match all filters sorted the the given keys.
Deleted instances will be returned by default, unless there's a filter that
says otherwise.
Depending on the name of a filter, matching for that filter is
performed using either exact matching or as regular expression
matching. Exact matching is applied for the following filters::
| ['project_id', 'user_id', 'image_ref',
| 'vm_state', 'instance_type_id', 'uuid',
| 'metadata', 'host', 'system_metadata']
A third type of filter (also using exact matching), filters
based on instance metadata tags when supplied under a special
key named 'filter'::
| filters = {
| 'filter': [
| {'name': 'tag-key', 'value': '<metakey>'},
| {'name': 'tag-value', 'value': '<metaval>'},
| {'name': 'tag:<metakey>', 'value': '<metaval>'}
| ]
| }
Special keys are used to tweek the query further::
| 'changes-since' - only return instances updated after
| 'deleted' - only return (or exclude) deleted instances
| 'soft_deleted' - modify behavior of 'deleted' to either
| include or exclude instances whose
| vm_state is SOFT_DELETED.
A fourth type of filter (also using exact matching), filters
based on instance tags (not metadata tags). There are two types
of these tags:
`tag` -- One or more strings that will be used to filter results
in an AND expression.
`tag-any` -- One or more strings that will be used to filter results in
an OR expression.
Tags should be represented as list::
| filters = {
| 'tag': [some-tag, some-another-tag],
| 'tag-any: [some-any-tag, some-another-any-tag]
| }
"""
# NOTE(mriedem): If the limit is 0 there is no point in even going
# to the database since nothing is going to be returned anyway.
if limit == 0:
return []
sort_keys, sort_dirs = process_sort_params(sort_keys,
sort_dirs,
default_dir='desc')
if CONF.database.slave_connection == '':
use_slave = False
session = get_session(use_slave=use_slave)
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
query_prefix = session.query(models.Instance)
for column in columns_to_join_new:
if 'extra.' in column:
query_prefix = query_prefix.options(undefer(column))
else:
query_prefix = query_prefix.options(joinedload(column))
# Note: order_by is done in the sqlalchemy.utils.py paginate_query(),
# no need to do it here as well
# Make a copy of the filters dictionary to use going forward, as we'll
# be modifying it and we shouldn't affect the caller's use of it.
filters = filters.copy()
if 'changes-since' in filters:
changes_since = timeutils.normalize_time(filters['changes-since'])
query_prefix = query_prefix.\
filter(models.Instance.updated_at >= changes_since)
if 'deleted' in filters:
# Instances can be soft or hard deleted and the query needs to
# include or exclude both
if filters.pop('deleted'):
if filters.pop('soft_deleted', True):
deleted = or_(
models.Instance.deleted == models.Instance.id,
models.Instance.vm_state == vm_states.SOFT_DELETED
)
query_prefix = query_prefix.\
filter(deleted)
else:
query_prefix = query_prefix.\
filter(models.Instance.deleted == models.Instance.id)
else:
query_prefix = query_prefix.\
filter_by(deleted=0)
if not filters.pop('soft_deleted', False):
# It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround.
not_soft_deleted = or_(
models.Instance.vm_state != vm_states.SOFT_DELETED,
models.Instance.vm_state == null()
)
query_prefix = query_prefix.filter(not_soft_deleted)
if 'cleaned' in filters:
if filters.pop('cleaned'):
query_prefix = query_prefix.filter(models.Instance.cleaned == 1)
else:
query_prefix = query_prefix.filter(models.Instance.cleaned == 0)
if 'tag' in filters:
tags = filters.pop('tag')
# We build a JOIN ladder expression for each tag, JOIN'ing
# the first tag to the instances table, and each subsequent
# tag to the last JOIN'd tags table
first_tag = tags.pop(0)
query_prefix = query_prefix.join(models.Instance.tags)
query_prefix = query_prefix.filter(models.Tag.tag == first_tag)
for tag in tags:
tag_alias = aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias,
models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag == tag)
if 'tag-any' in filters:
tags = filters.pop('tag-any')
tag_alias = aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias, models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag.in_(tags))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
filters['project_id'] = context.project_id
else:
filters['user_id'] = context.user_id
# Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
'metadata', 'host', 'task_state',
'system_metadata']
# Filter the query
query_prefix = _exact_instance_filter(query_prefix,
filters, exact_match_filter_names)
query_prefix = _regex_instance_filter(query_prefix, filters)
query_prefix = _tag_instance_filter(context, query_prefix, filters)
# paginate query
if marker is not None:
try:
marker = _instance_get_by_uuid(context, marker, session=session)
except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker)
try:
query_prefix = sqlalchemyutils.paginate_query(query_prefix,
models.Instance, limit,
sort_keys,
marker=marker,
sort_dirs=sort_dirs)
except db_exc.InvalidSortKey:
raise exception.InvalidSortKey()
return _instances_fill_metadata(context, query_prefix.all(), manual_joins)
def _tag_instance_filter(context, query, filters):
"""Applies tag filtering to an Instance query.
Returns the updated query. This method alters filters to remove
keys that are tags. This filters on resources by tags - this
method assumes that the caller will take care of access control
:param context: request context object
:param query: query to apply filters to
:param filters: dictionary of filters
"""
if filters.get('filter') is None:
return query
model = models.Instance
model_metadata = models.InstanceMetadata
model_uuid = model_metadata.instance_uuid
or_query = None
def _to_list(val):
if isinstance(val, dict):
val = val.values()
if not isinstance(val, (tuple, list, set)):
val = (val,)
return val
for filter_block in filters['filter']:
if not isinstance(filter_block, dict):
continue
filter_name = filter_block.get('name')
if filter_name is None:
continue
tag_name = filter_name[4:]
tag_val = _to_list(filter_block.get('value'))
if filter_name.startswith('tag-'):
if tag_name not in ['key', 'value']:
msg = _("Invalid field name: %s") % tag_name
raise exception.InvalidParameterValue(err=msg)
subq = getattr(model_metadata, tag_name).in_(tag_val)
or_query = subq if or_query is None else or_(or_query, subq)
elif filter_name.startswith('tag:'):
subq = model_query(context, model_metadata, (model_uuid,),
session=query.session).\
filter_by(key=tag_name).\
filter(model_metadata.value.in_(tag_val))
query = query.filter(model.uuid.in_(subq))
if or_query is not None:
subq = model_query(context, model_metadata, (model_uuid,),
session=query.session).\
filter(or_query)
query = query.filter(model.uuid.in_(subq))
return query
def _regex_instance_filter(query, filters):
"""Applies regular expression filtering to an Instance query.
Returns the updated query.
:param query: query to apply filters to
:param filters: dictionary of filters with regex values
"""
model = models.Instance
regexp_op_map = {
'postgresql': '~',
'mysql': 'REGEXP',
'sqlite': 'REGEXP'
}
db_string = CONF.database.connection.split(':')[0].split('+')[0]
db_regexp_op = regexp_op_map.get(db_string, 'LIKE')
for filter_name in filters.iterkeys():
try:
column_attr = getattr(model, filter_name)
except AttributeError:
continue
if 'property' == type(column_attr).__name__:
continue
if db_regexp_op == 'LIKE':
query = query.filter(column_attr.op(db_regexp_op)(
'%' + str(filters[filter_name]) + '%'))
else:
query = query.filter(column_attr.op(db_regexp_op)(
str(filters[filter_name])))
return query
def _exact_instance_filter(query, filters, legal_keys):
"""Applies exact match filtering to an Instance query.
Returns the updated query. Modifies filters argument to remove
filters consumed.
:param query: query to apply filters to
:param filters: dictionary of filters; values that are lists,
tuples, sets, or frozensets cause an 'IN' test to
be performed, while exact matching ('==' operator)
is used for other values
:param legal_keys: list of keys to apply exact filtering to
"""
filter_dict = {}
model = models.Instance
# Walk through all the keys
for key in legal_keys:
# Skip ones we're not filtering on
if key not in filters:
continue
# OK, filtering on this key; what value do we search for?
value = filters.pop(key)
if key in ('metadata', 'system_metadata'):
column_attr = getattr(model, key)
if isinstance(value, list):
for item in value:
for k, v in item.iteritems():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
else:
for k, v in value.iteritems():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
elif isinstance(value, (list, tuple, set, frozenset)):
# Looking for values in a list; apply to query directly
column_attr = getattr(model, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter_by(**filter_dict)
return query
def process_sort_params(sort_keys, sort_dirs,
default_keys=['created_at', 'id'],
default_dir='asc'):
"""Process the sort parameters to include default keys.
Creates a list of sort keys and a list of sort directions. Adds the default
keys to the end of the list if they are not already included.
When adding the default keys to the sort keys list, the associated
direction is:
1) The first element in the 'sort_dirs' list (if specified), else
2) 'default_dir' value (Note that 'asc' is the default value since this is
the default in sqlalchemy.utils.paginate_query)
:param sort_keys: List of sort keys to include in the processed list
:param sort_dirs: List of sort directions to include in the processed list
:param default_keys: List of sort keys that need to be included in the
processed list, they are added at the end of the list
if not already specified.
:param default_dir: Sort direction associated with each of the default
keys that are not supplied, used when they are added
to the processed list
:returns: list of sort keys, list of sort directions
:raise exception.InvalidInput: If more sort directions than sort keys
are specified or if an invalid sort
direction is specified
"""
# Determine direction to use for when adding default keys
if sort_dirs and len(sort_dirs) != 0:
default_dir_value = sort_dirs[0]
else:
default_dir_value = default_dir
# Create list of keys (do not modify the input list)
if sort_keys:
result_keys = list(sort_keys)
else:
result_keys = []
# If a list of directions is not provided, use the default sort direction
# for all provided keys
if sort_dirs:
result_dirs = []
# Verify sort direction
for sort_dir in sort_dirs:
if sort_dir not in ('asc', 'desc'):
msg = _("Unknown sort direction, must be 'desc' or 'asc'")
raise exception.InvalidInput(reason=msg)
result_dirs.append(sort_dir)
else:
result_dirs = [default_dir_value for _sort_key in result_keys]
# Ensure that the key and direction length match
while len(result_dirs) < len(result_keys):
result_dirs.append(default_dir_value)
# Unless more direction are specified, which is an error
if len(result_dirs) > len(result_keys):
msg = _("Sort direction size exceeds sort key size")
raise exception.InvalidInput(reason=msg)
# Ensure defaults are included
for key in default_keys:
if key not in result_keys:
result_keys.append(key)
result_dirs.append(default_dir_value)
return result_keys, result_dirs
@require_context
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None,
use_slave=False,
columns_to_join=None):
"""Return instances and joins that were active during window."""
session = get_session(use_slave=use_slave)
query = session.query(models.Instance)
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
for column in columns_to_join_new:
if 'extra.' in column:
query = query.options(undefer(column))
else:
query = query.options(joinedload(column))
query = query.filter(or_(models.Instance.terminated_at == null(),
models.Instance.terminated_at > begin))
if end:
query = query.filter(models.Instance.launched_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
if host:
query = query.filter_by(host=host)
return _instances_fill_metadata(context, query.all(), manual_joins)
def _instance_get_all_query(context, project_only=False,
joins=None, use_slave=False):
if joins is None:
joins = ['info_cache', 'security_groups']
query = model_query(context,
models.Instance,
project_only=project_only,
use_slave=use_slave)
for column in joins:
if 'extra.' in column:
query = query.options(undefer(column))
else:
query = query.options(joinedload(column))
return query
@require_admin_context
def instance_get_all_by_host(context, host,
columns_to_join=None,
use_slave=False):
return _instances_fill_metadata(context,
_instance_get_all_query(context,
use_slave=use_slave).filter_by(host=host).all(),
manual_joins=columns_to_join,
use_slave=use_slave)
def _instance_get_all_uuids_by_host(context, host, session=None):
"""Return a list of the instance uuids on a given host.
Returns a list of UUIDs, not Instance model objects. This internal version
allows you to specify a session object as a kwarg.
"""
uuids = []
for tuple in model_query(context, models.Instance, (models.Instance.uuid,),
read_deleted="no", session=session).\
filter_by(host=host).\
all():
uuids.append(tuple[0])
return uuids
@require_admin_context
def instance_get_all_by_host_and_node(context, host, node,
columns_to_join=None):
if columns_to_join is None:
manual_joins = []
else:
candidates = ['system_metadata', 'metadata']
manual_joins = filter(lambda x: x in candidates,
columns_to_join)
columns_to_join = list(set(columns_to_join) - set(candidates))
return _instances_fill_metadata(context,
_instance_get_all_query(
context,
joins=columns_to_join).filter_by(host=host).
filter_by(node=node).all(), manual_joins=manual_joins)
@require_admin_context
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
return _instances_fill_metadata(context,
_instance_get_all_query(context).filter_by(host=host).
filter(models.Instance.instance_type_id != type_id).all())
@require_context
def instance_floating_address_get_all(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
floating_ips = model_query(context,
models.FloatingIp,
(models.FloatingIp.address,)).\
join(models.FloatingIp.fixed_ip).\
filter_by(instance_uuid=instance_uuid)
return [floating_ip.address for floating_ip in floating_ips]
# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
@require_admin_context
def instance_get_all_hung_in_rebooting(context, reboot_window):
reboot_window = (timeutils.utcnow() -
datetime.timedelta(seconds=reboot_window))
# NOTE(danms): this is only used in the _poll_rebooting_instances()
# call in compute/manager, so we can avoid the metadata lookups
# explicitly
return _instances_fill_metadata(context,
model_query(context, models.Instance).
filter(models.Instance.updated_at <= reboot_window).
filter_by(task_state=task_states.REBOOTING).all(),
manual_joins=[])
@require_context
def instance_update(context, instance_uuid, values):
instance_ref = _instance_update(context, instance_uuid, values)[1]
return instance_ref
@require_context
def instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=None):
"""Set the given properties on an instance and update it. Return
a shallow copy of the original instance reference, as well as the
updated one.
:param context: = request context object
:param instance_uuid: = instance uuid
:param values: = dict containing column values
If "expected_task_state" exists in values, the update can only happen
when the task state before update matches expected_task_state. Otherwise
a UnexpectedTaskStateError is thrown.
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
Raises NotFound if instance does not exist.
"""
return _instance_update(context, instance_uuid, values,
copy_old_instance=True,
columns_to_join=columns_to_join)
# NOTE(danms): This updates the instance's metadata list in-place and in
# the database to avoid stale data and refresh issues. It assumes the
# delete=True behavior of instance_metadata_update(...)
def _instance_metadata_update_in_place(context, instance, metadata_type, model,
metadata, session):
metadata = dict(metadata)
to_delete = []
for keyvalue in instance[metadata_type]:
key = keyvalue['key']
if key in metadata:
keyvalue['value'] = metadata.pop(key)
elif key not in metadata:
to_delete.append(keyvalue)
for condemned in to_delete:
condemned.soft_delete(session=session)
for key, value in metadata.iteritems():
newitem = model()
newitem.update({'key': key, 'value': value,
'instance_uuid': instance['uuid']})
session.add(newitem)
instance[metadata_type].append(newitem)
@_retry_on_deadlock
def _instance_update(context, instance_uuid, values, copy_old_instance=False,
columns_to_join=None):
session = get_session()
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(instance_uuid)
with session.begin():
instance_ref = _instance_get_by_uuid(context, instance_uuid,
session=session,
columns_to_join=columns_to_join)
if "expected_task_state" in values:
# it is not a db column so always pop out
expected = values.pop("expected_task_state")
if not isinstance(expected, (tuple, list, set)):
expected = (expected,)
actual_state = instance_ref["task_state"]
if actual_state not in expected:
if actual_state == task_states.DELETING:
raise exception.UnexpectedDeletingTaskStateError(
actual=actual_state, expected=expected)
else:
raise exception.UnexpectedTaskStateError(
actual=actual_state, expected=expected)
if "expected_vm_state" in values:
expected = values.pop("expected_vm_state")
if not isinstance(expected, (tuple, list, set)):
expected = (expected,)
actual_state = instance_ref["vm_state"]
if actual_state not in expected:
raise exception.UnexpectedVMStateError(actual=actual_state,
expected=expected)
instance_hostname = instance_ref['hostname'] or ''
if ("hostname" in values and
values["hostname"].lower() != instance_hostname.lower()):
_validate_unique_server_name(context,
session,
values['hostname'])
if copy_old_instance:
old_instance_ref = copy.copy(instance_ref)
else:
old_instance_ref = None
metadata = values.get('metadata')
if metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'metadata',
models.InstanceMetadata,
values.pop('metadata'),
session)
system_metadata = values.get('system_metadata')
if system_metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'system_metadata',
models.InstanceSystemMetadata,
values.pop('system_metadata'),
session)
_handle_objects_related_type_conversions(values)
instance_ref.update(values)
session.add(instance_ref)
return (old_instance_ref, instance_ref)
def instance_add_security_group(context, instance_uuid, security_group_id):
"""Associate the given security group with the given instance."""
sec_group_ref = models.SecurityGroupInstanceAssociation()
sec_group_ref.update({'instance_uuid': instance_uuid,
'security_group_id': security_group_id})
sec_group_ref.save()
@require_context
def instance_remove_security_group(context, instance_uuid, security_group_id):
"""Disassociate the given security group from the given instance."""
model_query(context, models.SecurityGroupInstanceAssociation).\
filter_by(instance_uuid=instance_uuid).\
filter_by(security_group_id=security_group_id).\
soft_delete()
###################
@require_context
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
:param session: = optional session object
"""
return model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
first()
@require_context
def instance_info_cache_update(context, instance_uuid, values):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
:param session: = optional session object
"""
session = get_session()
with session.begin():
info_cache = model_query(context, models.InstanceInfoCache,
session=session).\
filter_by(instance_uuid=instance_uuid).\
first()
if info_cache and info_cache['deleted']:
raise exception.InstanceInfoCacheNotFound(
instance_uuid=instance_uuid)
elif not info_cache:
# NOTE(tr3buchet): just in case someone blows away an instance's
# cache entry, re-create it.
info_cache = models.InstanceInfoCache()
values['instance_uuid'] = instance_uuid
try:
info_cache.update(values)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to
# recreate the instance cache entry at the same time. First one
# wins.
pass
return info_cache
@require_context
def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
:param session: = optional session object
"""
model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
###################
def _instance_extra_create(context, values):
inst_extra_ref = models.InstanceExtra()
inst_extra_ref.update(values)
inst_extra_ref.save()
return inst_extra_ref
def instance_extra_update_by_uuid(context, instance_uuid, values):
return model_query(context, models.InstanceExtra).\
filter_by(instance_uuid=instance_uuid).\
update(values)
def instance_extra_get_by_instance_uuid(context, instance_uuid,
columns=None):
query = model_query(context, models.InstanceExtra).\
filter_by(instance_uuid=instance_uuid)
if columns is None:
columns = ['numa_topology', 'pci_requests']
for column in columns:
query = query.options(undefer(column))
instance_extra = query.first()
return instance_extra
###################
@require_context
def key_pair_create(context, values):
try:
key_pair_ref = models.KeyPair()
key_pair_ref.update(values)
key_pair_ref.save()
return key_pair_ref
except db_exc.DBDuplicateEntry:
raise exception.KeyPairExists(key_name=values['name'])
@require_context
def key_pair_destroy(context, user_id, name):
nova.context.authorize_user_context(context, user_id)
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
soft_delete()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
@require_context
def key_pair_get(context, user_id, name):
nova.context.authorize_user_context(context, user_id)
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
first()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
return result
@require_context
def key_pair_get_all_by_user(context, user_id):
nova.context.authorize_user_context(context, user_id)
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
all()
def key_pair_count_by_user(context, user_id):
nova.context.authorize_user_context(context, user_id)
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
count()
###################
@require_admin_context
def network_associate(context, project_id, network_id=None, force=False):
"""Associate a project with a network.
called by project_get_networks under certain conditions
and network manager add_network_to_project()
only associate if the project doesn't already have a network
or if force is True
force solves race condition where a fresh project has multiple instance
builds simultaneously picked up by multiple network hosts which attempt
to associate the project with multiple networks
force should only be used as a direct consequence of user request
all automated requests should not use force
"""
session = get_session()
with session.begin():
def network_query(project_filter, id=None):
filter_kwargs = {'project_id': project_filter}
if id is not None:
filter_kwargs['id'] = id
return model_query(context, models.Network, session=session,
read_deleted="no").\
filter_by(**filter_kwargs).\
with_lockmode('update').\
first()
if not force:
# find out if project has a network
network_ref = network_query(project_id)
if force or not network_ref:
# in force mode or project doesn't have a network so associate
# with a new network
# get new network
network_ref = network_query(None, network_id)
if not network_ref:
raise exception.NoMoreNetworks()
# associate with network
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
network_ref['project_id'] = project_id
session.add(network_ref)
return network_ref
def _network_ips_query(context, network_id):
return model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id)
@require_admin_context
def network_count_reserved_ips(context, network_id):
return _network_ips_query(context, network_id).\
filter_by(reserved=True).\
count()
@require_admin_context
def network_create_safe(context, values):
network_ref = models.Network()
network_ref['uuid'] = str(uuid.uuid4())
network_ref.update(values)
try:
network_ref.save()
return network_ref
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
@require_admin_context
def network_delete_safe(context, network_id):
session = get_session()
with session.begin():
result = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(allocated=True).\
count()
if result != 0:
raise exception.NetworkInUse(network_id=network_id)
network_ref = _network_get(context, network_id=network_id,
session=session)
model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(network_id=network_id).\
soft_delete()
session.delete(network_ref)
@require_admin_context
def network_disassociate(context, network_id, disassociate_host,
disassociate_project):
net_update = {}
if disassociate_project:
net_update['project_id'] = None
if disassociate_host:
net_update['host'] = None
network_update(context, network_id, net_update)
def _network_get(context, network_id, session=None, project_only='allow_none'):
result = model_query(context, models.Network, session=session,
project_only=project_only).\
filter_by(id=network_id).\
first()
if not result:
raise exception.NetworkNotFound(network_id=network_id)
return result
@require_context
def network_get(context, network_id, project_only='allow_none'):
return _network_get(context, network_id, project_only=project_only)
@require_context
def network_get_all(context, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).all()
if not result:
raise exception.NoNetworksFound()
return result
@require_context
def network_get_all_by_uuids(context, network_uuids, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).\
filter(models.Network.uuid.in_(network_uuids)).\
all()
if not result:
raise exception.NoNetworksFound()
# check if the result contains all the networks
# we are looking for
for network_uuid in network_uuids:
for network in result:
if network['uuid'] == network_uuid:
break
else:
if project_only:
raise exception.NetworkNotFoundForProject(
network_uuid=network_uuid, project_id=context.project_id)
raise exception.NetworkNotFound(network_id=network_uuid)
return result
@require_admin_context
def network_get_associated_fixed_ips(context, network_id, host=None):
# FIXME(sirp): since this returns fixed_ips, this would be better named
# fixed_ip_get_all_by_network.
# NOTE(vish): The ugly joins here are to solve a performance issue and
# should be removed once we can add and remove leases
# without regenerating the whole list
vif_and = and_(models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id,
models.VirtualInterface.deleted == 0)
inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
models.Instance.deleted == 0)
session = get_session()
# NOTE(vish): This subquery left joins the minimum interface id for each
# instance. If the join succeeds (i.e. the 11th column is not
# null), then the fixed ip is on the first interface.
subq = session.query(func.min(models.VirtualInterface.id).label("id"),
models.VirtualInterface.instance_uuid).\
group_by(models.VirtualInterface.instance_uuid).subquery()
subq_and = and_(subq.c.id == models.FixedIp.virtual_interface_id,
subq.c.instance_uuid == models.VirtualInterface.instance_uuid)
query = session.query(models.FixedIp.address,
models.FixedIp.instance_uuid,
models.FixedIp.network_id,
models.FixedIp.virtual_interface_id,
models.VirtualInterface.address,
models.Instance.hostname,
models.Instance.updated_at,
models.Instance.created_at,
models.FixedIp.allocated,
models.FixedIp.leased,
subq.c.id).\
filter(models.FixedIp.deleted == 0).\
filter(models.FixedIp.network_id == network_id).\
join((models.VirtualInterface, vif_and)).\
join((models.Instance, inst_and)).\
outerjoin((subq, subq_and)).\
filter(models.FixedIp.instance_uuid != null()).\
filter(models.FixedIp.virtual_interface_id != null())
if host:
query = query.filter(models.Instance.host == host)
result = query.all()
data = []
for datum in result:
cleaned = {}
cleaned['address'] = datum[0]
cleaned['instance_uuid'] = datum[1]
cleaned['network_id'] = datum[2]
cleaned['vif_id'] = datum[3]
cleaned['vif_address'] = datum[4]
cleaned['instance_hostname'] = datum[5]
cleaned['instance_updated'] = datum[6]
cleaned['instance_created'] = datum[7]
cleaned['allocated'] = datum[8]
cleaned['leased'] = datum[9]
# NOTE(vish): default_route is True if this fixed ip is on the first
# interface its instance.
cleaned['default_route'] = datum[10] is not None
data.append(cleaned)
return data
def network_in_use_on_host(context, network_id, host):
fixed_ips = network_get_associated_fixed_ips(context, network_id, host)
return len(fixed_ips) > 0
def _network_get_query(context, session=None):
return model_query(context, models.Network, session=session,
read_deleted="no")
@require_admin_context
def network_get_by_uuid(context, uuid):
result = _network_get_query(context).filter_by(uuid=uuid).first()
if not result:
raise exception.NetworkNotFoundForUUID(uuid=uuid)
return result
@require_admin_context
def network_get_by_cidr(context, cidr):
result = _network_get_query(context).\
filter(or_(models.Network.cidr == cidr,
models.Network.cidr_v6 == cidr)).\
first()
if not result:
raise exception.NetworkNotFoundForCidr(cidr=cidr)
return result
@require_admin_context
def network_get_all_by_host(context, host):
session = get_session()
fixed_host_filter = or_(models.FixedIp.host == host,
and_(models.FixedIp.instance_uuid != null(),
models.Instance.host == host))
fixed_ip_query = model_query(context, models.FixedIp,
(models.FixedIp.network_id,),
session=session).\
outerjoin((models.Instance,
models.Instance.uuid ==
models.FixedIp.instance_uuid)).\
filter(fixed_host_filter)
# NOTE(vish): return networks that have host set
# or that have a fixed ip with host set
# or that have an instance with host set
host_filter = or_(models.Network.host == host,
models.Network.id.in_(fixed_ip_query.subquery()))
return _network_get_query(context, session=session).\
filter(host_filter).\
all()
@require_admin_context
def network_set_host(context, network_id, host_id):
session = get_session()
with session.begin():
network_ref = _network_get_query(context, session=session).\
filter_by(id=network_id).\
with_lockmode('update').\
first()
if not network_ref:
raise exception.NetworkNotFound(network_id=network_id)
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not network_ref['host']:
network_ref['host'] = host_id
session.add(network_ref)
return network_ref['host']
@require_context
def network_update(context, network_id, values):
session = get_session()
with session.begin():
network_ref = _network_get(context, network_id, session=session)
network_ref.update(values)
try:
network_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
return network_ref
###################
@require_context
def quota_get(context, project_id, resource, user_id=None):
model = models.ProjectUserQuota if user_id else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
query = query.filter_by(user_id=user_id)
result = query.first()
if not result:
if user_id:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
def quota_get_all_by_project_and_user(context, project_id, user_id):
nova.context.authorize_project_context(context, project_id)
user_quotas = model_query(context, models.ProjectUserQuota,
(models.ProjectUserQuota.resource,
models.ProjectUserQuota.hard_limit)).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
all()
result = {'project_id': project_id, 'user_id': user_id}
for user_quota in user_quotas:
result[user_quota.resource] = user_quota.hard_limit
return result
@require_context
def quota_get_all_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_get_all(context, project_id):
nova.context.authorize_project_context(context, project_id)
result = model_query(context, models.ProjectUserQuota).\
filter_by(project_id=project_id).\
all()
return result
@require_admin_context
def quota_create(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
quota_ref = models.ProjectUserQuota() if per_user else models.Quota()
if per_user:
quota_ref.user_id = user_id
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
try:
quota_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.QuotaExists(project_id=project_id, resource=resource)
return quota_ref
@require_admin_context
def quota_update(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
model = models.ProjectUserQuota if per_user else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if per_user:
query = query.filter_by(user_id=user_id)
result = query.update({'hard_limit': limit})
if not result:
if per_user:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
###################
@require_context
def quota_class_get(context, class_name, resource):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
def quota_class_get_default(context):
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=_DEFAULT_QUOTA_NAME).\
all()
result = {'class_name': _DEFAULT_QUOTA_NAME}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_class_get_all_by_name(context, class_name):
nova.context.authorize_quota_class_context(context, class_name)
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
all()
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_admin_context
def quota_class_create(context, class_name, resource, limit):
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
quota_class_ref.save()
return quota_class_ref
@require_admin_context
def quota_class_update(context, class_name, resource, limit):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
update({'hard_limit': limit})
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
###################
@require_context
def quota_usage_get(context, project_id, resource, user_id=None):
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
if resource not in PER_PROJECT_QUOTAS:
result = query.filter_by(user_id=user_id).first()
else:
result = query.filter_by(user_id=None).first()
else:
result = query.first()
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
return result
def _quota_usage_get_all(context, project_id, user_id=None):
nova.context.authorize_project_context(context, project_id)
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id)
result = {'project_id': project_id}
if user_id:
query = query.filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == null()))
result['user_id'] = user_id
rows = query.all()
for row in rows:
if row.resource in result:
result[row.resource]['in_use'] += row.in_use
result[row.resource]['reserved'] += row.reserved
else:
result[row.resource] = dict(in_use=row.in_use,
reserved=row.reserved)
return result
@require_context
def quota_usage_get_all_by_project_and_user(context, project_id, user_id):
return _quota_usage_get_all(context, project_id, user_id=user_id)
@require_context
def quota_usage_get_all_by_project(context, project_id):
return _quota_usage_get_all(context, project_id)
def _quota_usage_create(project_id, user_id, resource, in_use,
reserved, until_refresh, session=None):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
quota_usage_ref.user_id = user_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
# updated_at is needed for judgement of max_age
quota_usage_ref.updated_at = timeutils.utcnow()
quota_usage_ref.save(session=session)
return quota_usage_ref
@require_admin_context
def quota_usage_update(context, project_id, user_id, resource, **kwargs):
updates = {}
for key in ['in_use', 'reserved', 'until_refresh']:
if key in kwargs:
updates[key] = kwargs[key]
result = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == null())).\
update(updates)
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
###################
def _reservation_create(uuid, usage, project_id, user_id, resource,
delta, expire, session=None):
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage['id']
reservation_ref.project_id = project_id
reservation_ref.user_id = user_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.save(session=session)
return reservation_ref
###################
# NOTE(johannes): The quota code uses SQL locking to ensure races don't
# cause under or over counting of resources. To avoid deadlocks, this
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
def _get_project_user_quota_usages(context, session, project_id,
user_id):
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
with_lockmode('update').\
all()
proj_result = dict()
user_result = dict()
# Get the total count of in_use,reserved
for row in rows:
proj_result.setdefault(row.resource,
dict(in_use=0, reserved=0, total=0))
proj_result[row.resource]['in_use'] += row.in_use
proj_result[row.resource]['reserved'] += row.reserved
proj_result[row.resource]['total'] += (row.in_use + row.reserved)
if row.user_id is None or row.user_id == user_id:
user_result[row.resource] = row
return proj_result, user_result
def _create_quota_usage_if_missing(user_usages, resource, until_refresh,
project_id, user_id, session):
"""Creates a QuotaUsage record and adds to user_usages if not present.
:param user_usages: dict of resource keys to QuotaUsage records. This is
updated if resource is not in user_usages yet or
until_refresh is not None.
:param resource: The resource being checked for quota usage.
:param until_refresh: Count of reservations until usage is refreshed,
int or None
:param max_age: Number of seconds between subsequent usage refreshes.
:param project_id: The project being checked for quota usage.
:param user_id: The user being checked for quota usage.
:param session: DB session holding a transaction lock.
:return: True if a new QuotaUsage record was created and added
to user_usages, False otherwise.
"""
new_usage = None
if resource not in user_usages:
user_id_to_use = user_id
if resource in PER_PROJECT_QUOTAS:
user_id_to_use = None
new_usage = _quota_usage_create(project_id, user_id_to_use, resource,
0, 0, until_refresh or None,
session=session)
user_usages[resource] = new_usage
return new_usage is not None
def _is_quota_refresh_needed(quota_usage, max_age):
"""Determines if a quota usage refresh is needed.
:param quota_usage: A QuotaUsage object for a given resource.
:param max_age: Number of seconds between subsequent usage refreshes.
:return: True if a refresh is needed, False otherwise.
"""
refresh = False
if quota_usage.in_use < 0:
# Negative in_use count indicates a desync, so try to
# heal from that...
refresh = True
elif quota_usage.until_refresh is not None:
quota_usage.until_refresh -= 1
if quota_usage.until_refresh <= 0:
refresh = True
elif max_age and (timeutils.utcnow() -
quota_usage.updated_at).seconds >= max_age:
refresh = True
return refresh
def _refresh_quota_usages(quota_usage, until_refresh, in_use):
"""Refreshes quota usage for the given resource.
:param quota_usage: A QuotaUsage object for a given resource.
:param until_refresh: Count of reservations until usage is refreshed,
int or None
:param in_use: Actual quota usage for the resource.
"""
if quota_usage.in_use != in_use:
LOG.info(_LI('quota_usages out of sync, updating. '
'project_id: %(project_id)s, '
'user_id: %(user_id)s, '
'resource: %(res)s, '
'tracked usage: %(tracked_use)s, '
'actual usage: %(in_use)s'),
{'project_id': quota_usage.project_id,
'user_id': quota_usage.user_id,
'res': quota_usage.resource,
'tracked_use': quota_usage.in_use,
'in_use': in_use})
# Update the usage
quota_usage.in_use = in_use
quota_usage.until_refresh = until_refresh or None
def _calculate_overquota(project_quotas, user_quotas, deltas,
project_usages, user_usages):
"""Checks if any resources will go over quota based on the request.
:param project_quotas: dict of resource quotas (limits) for the project.
:param user_quotas: dict of resource quotas (limits) for the user.
:param deltas: dict of resource keys to positive/negative quota
changes for the resources in a given operation.
:param project_usages: dict of resource keys to QuotaUsage records for the
project.
:param user_usages: dict of resource keys to QuotaUsage records for the
user.
:return: list of resources that are over-quota for the
operation.
"""
overs = []
for res, delta in deltas.items():
# We can't go over-quota if we're not reserving anything or if
# we have unlimited quotas.
if user_quotas[res] >= 0 and delta >= 0:
# over if the project usage + delta is more than project quota
if project_quotas[res] < delta + project_usages[res]['total']:
overs.append(res)
# over if the user usage + delta is more than user quota
elif user_quotas[res] < delta + user_usages[res]['total']:
overs.append(res)
return overs
@require_context
@_retry_on_deadlock
def quota_reserve(context, resources, project_quotas, user_quotas, deltas,
expire, until_refresh, max_age, project_id=None,
user_id=None):
elevated = context.elevated()
session = get_session()
with session.begin():
if project_id is None:
project_id = context.project_id
if user_id is None:
user_id = context.user_id
# Get the current usages
project_usages, user_usages = _get_project_user_quota_usages(
context, session, project_id, user_id)
# Handle usage refresh
work = set(deltas.keys())
while work:
resource = work.pop()
# Do we need to refresh the usage?
created = _create_quota_usage_if_missing(user_usages, resource,
until_refresh, project_id,
user_id, session)
refresh = created or _is_quota_refresh_needed(
user_usages[resource], max_age)
# OK, refresh the usage
if refresh:
# Grab the sync routine
sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
updates = sync(elevated, project_id, user_id, session)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
_create_quota_usage_if_missing(user_usages, res,
until_refresh, project_id,
user_id, session)
_refresh_quota_usages(user_usages[res], until_refresh,
in_use)
# Because more than one resource may be refreshed
# by the call to the sync routine, and we don't
# want to double-sync, we make sure all refreshed
# resources are dropped from the work set.
work.discard(res)
# NOTE(Vek): We make the assumption that the sync
# routine actually refreshes the
# resources that it is the sync routine
# for. We don't check, because this is
# a best-effort mechanism.
# Check for deltas that would go negative
unders = [res for res, delta in deltas.items()
if delta < 0 and
delta + user_usages[res].in_use < 0]
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
# If a project has gone over quota, we want them to
# be able to reduce their usage without any
# problems.
for key, value in user_usages.items():
if key not in project_usages:
project_usages[key] = value
overs = _calculate_overquota(project_quotas, user_quotas, deltas,
project_usages, user_usages)
# NOTE(Vek): The quota check needs to be in the transaction,
# but the transaction doesn't fail just because
# we're over quota, so the OverQuota raise is
# outside the transaction. If we did the raise
# here, our usage updates would be discarded, but
# they're not invalidated by being over-quota.
# Create the reservations
if not overs:
reservations = []
for res, delta in deltas.items():
reservation = _reservation_create(
str(uuid.uuid4()),
user_usages[res],
project_id,
user_id,
res, delta, expire,
session=session)
reservations.append(reservation.uuid)
# Also update the reserved quantity
# NOTE(Vek): Again, we are only concerned here about
# positive increments. Here, though, we're
# worried about the following scenario:
#
# 1) User initiates resize down.
# 2) User allocates a new instance.
# 3) Resize down fails or is reverted.
# 4) User is now over quota.
#
# To prevent this, we only update the
# reserved value if the delta is positive.
if delta > 0:
user_usages[res].reserved += delta
# Apply updates to the usages table
for usage_ref in user_usages.values():
session.add(usage_ref)
if unders:
LOG.warning(_LW("Change will make usage less than 0 for the following "
"resources: %s"), unders)
if overs:
if project_quotas == user_quotas:
usages = project_usages
else:
usages = user_usages
usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'])
for k, v in usages.items()}
LOG.debug('Raise OverQuota exception because: '
'project_quotas: %(project_quotas)s, '
'user_quotas: %(user_quotas)s, deltas: %(deltas)s, '
'overs: %(overs)s, project_usages: %(project_usages)s, '
'user_usages: %(user_usages)s',
{'project_quotas': project_quotas,
'user_quotas': user_quotas,
'overs': overs, 'deltas': deltas,
'project_usages': project_usages,
'user_usages': user_usages})
raise exception.OverQuota(overs=sorted(overs), quotas=user_quotas,
usages=usages)
return reservations
def _quota_reservations_query(session, context, reservations):
"""Return the relevant reservations."""
# Get the listed reservations
return model_query(context, models.Reservation,
read_deleted="no",
session=session).\
filter(models.Reservation.uuid.in_(reservations)).\
with_lockmode('update')
@require_context
@_retry_on_deadlock
def reservation_commit(context, reservations, project_id=None, user_id=None):
session = get_session()
with session.begin():
_project_usages, user_usages = _get_project_user_quota_usages(
context, session, project_id, user_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
usage = user_usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
usage.in_use += reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@require_context
@_retry_on_deadlock
def reservation_rollback(context, reservations, project_id=None, user_id=None):
session = get_session()
with session.begin():
_project_usages, user_usages = _get_project_user_quota_usages(
context, session, project_id, user_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
usage = user_usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@require_admin_context
def quota_destroy_all_by_project_and_user(context, project_id, user_id):
session = get_session()
with session.begin():
model_query(context, models.ProjectUserQuota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
@require_admin_context
def quota_destroy_all_by_project(context, project_id):
session = get_session()
with session.begin():
model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.ProjectUserQuota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
@require_admin_context
@_retry_on_deadlock
def reservation_expire(context):
session = get_session()
with session.begin():
current_time = timeutils.utcnow()
reservation_query = model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter(models.Reservation.expire < current_time)
for reservation in reservation_query.join(models.QuotaUsage).all():
if reservation.delta >= 0:
reservation.usage.reserved -= reservation.delta
session.add(reservation.usage)
reservation_query.soft_delete(synchronize_session=False)
###################
def _ec2_volume_get_query(context, session=None):
return model_query(context, models.VolumeIdMapping,
session=session, read_deleted='yes')
def _ec2_snapshot_get_query(context, session=None):
return model_query(context, models.SnapshotIdMapping,
session=session, read_deleted='yes')
@require_context
def ec2_volume_create(context, volume_uuid, id=None):
"""Create ec2 compatible volume by provided uuid."""
ec2_volume_ref = models.VolumeIdMapping()
ec2_volume_ref.update({'uuid': volume_uuid})
if id is not None:
ec2_volume_ref.update({'id': id})
ec2_volume_ref.save()
return ec2_volume_ref
@require_context
def ec2_volume_get_by_uuid(context, volume_uuid):
result = _ec2_volume_get_query(context).\
filter_by(uuid=volume_uuid).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_uuid)
return result
@require_context
def ec2_volume_get_by_id(context, volume_id):
result = _ec2_volume_get_query(context).\
filter_by(id=volume_id).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result
@require_context
def ec2_snapshot_create(context, snapshot_uuid, id=None):
"""Create ec2 compatible snapshot by provided uuid."""
ec2_snapshot_ref = models.SnapshotIdMapping()
ec2_snapshot_ref.update({'uuid': snapshot_uuid})
if id is not None:
ec2_snapshot_ref.update({'id': id})
ec2_snapshot_ref.save()
return ec2_snapshot_ref
@require_context
def ec2_snapshot_get_by_ec2_id(context, ec2_id):
result = _ec2_snapshot_get_query(context).\
filter_by(id=ec2_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=ec2_id)
return result
@require_context
def ec2_snapshot_get_by_uuid(context, snapshot_uuid):
result = _ec2_snapshot_get_query(context).\
filter_by(uuid=snapshot_uuid).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_uuid)
return result
###################
def _block_device_mapping_get_query(context, session=None,
columns_to_join=None, use_slave=False):
if columns_to_join is None:
columns_to_join = []
query = model_query(context, models.BlockDeviceMapping,
session=session, use_slave=use_slave)
for column in columns_to_join:
query = query.options(joinedload(column))
return query
def _scrub_empty_str_values(dct, keys_to_scrub):
"""Remove any keys found in sequence keys_to_scrub from the dict
if they have the value ''.
"""
for key in keys_to_scrub:
if key in dct and dct[key] == '':
del dct[key]
def _from_legacy_values(values, legacy, allow_updates=False):
if legacy:
if allow_updates and block_device.is_safe_for_update(values):
return values
else:
return block_device.BlockDeviceDict.from_legacy(values)
else:
return values
@require_context
def block_device_mapping_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy)
bdm_ref = models.BlockDeviceMapping()
bdm_ref.update(values)
bdm_ref.save()
return bdm_ref
@require_context
def block_device_mapping_update(context, bdm_id, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
query = _block_device_mapping_get_query(context).filter_by(id=bdm_id)
query.update(values)
return query.first()
def block_device_mapping_update_or_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
session = get_session()
with session.begin():
result = None
# NOTE(xqueralt): Only update a BDM when device_name was provided. We
# allow empty device names so they will be set later by the manager.
if values['device_name']:
query = _block_device_mapping_get_query(context, session=session)
result = query.filter_by(instance_uuid=values['instance_uuid'],
device_name=values['device_name']).first()
if result:
result.update(values)
else:
# Either the device_name doesn't exist in the database yet, or no
# device_name was provided. Both cases mean creating a new BDM.
result = models.BlockDeviceMapping(**values)
result.save(session=session)
# NOTE(xqueralt): Prevent from having multiple swap devices for the
# same instance. This will delete all the existing ones.
if block_device.new_format_is_swap(values):
query = _block_device_mapping_get_query(context, session=session)
query = query.filter_by(instance_uuid=values['instance_uuid'],
source_type='blank', guest_format='swap')
query = query.filter(models.BlockDeviceMapping.id != result.id)
query.soft_delete()
return result
@require_context
def block_device_mapping_get_all_by_instance(context, instance_uuid,
use_slave=False):
return _block_device_mapping_get_query(context, use_slave=use_slave).\
filter_by(instance_uuid=instance_uuid).\
all()
@require_context
def block_device_mapping_get_by_volume_id(context, volume_id,
columns_to_join=None):
return _block_device_mapping_get_query(context,
columns_to_join=columns_to_join).\
filter_by(volume_id=volume_id).\
first()
@require_context
def block_device_mapping_destroy(context, bdm_id):
_block_device_mapping_get_query(context).\
filter_by(id=bdm_id).\
soft_delete()
@require_context
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(volume_id=volume_id).\
soft_delete()
@require_context
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(device_name=device_name).\
soft_delete()
###################
def _security_group_create(context, values, session=None):
security_group_ref = models.SecurityGroup()
# FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
# once save() is called. This will get cleaned up in next orm pass.
security_group_ref.rules
security_group_ref.update(values)
try:
security_group_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=values['project_id'],
security_group_name=values['name'])
return security_group_ref
def _security_group_get_query(context, session=None, read_deleted=None,
project_only=False, join_rules=True):
query = model_query(context, models.SecurityGroup, session=session,
read_deleted=read_deleted, project_only=project_only)
if join_rules:
query = query.options(joinedload_all('rules.grantee_group'))
return query
def _security_group_get_by_names(context, session, project_id, group_names):
"""Get security group models for a project by a list of names.
Raise SecurityGroupNotFoundForProject for a name not found.
"""
query = _security_group_get_query(context, session=session,
read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter(models.SecurityGroup.name.in_(group_names))
sg_models = query.all()
if len(sg_models) == len(group_names):
return sg_models
# Find the first one missing and raise
group_names_from_models = [x.name for x in sg_models]
for group_name in group_names:
if group_name not in group_names_from_models:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
# Not Reached
@require_context
def security_group_get_all(context):
return _security_group_get_query(context).all()
@require_context
def security_group_get(context, security_group_id, columns_to_join=None):
query = _security_group_get_query(context, project_only=True).\
filter_by(id=security_group_id)
if columns_to_join is None:
columns_to_join = []
for column in columns_to_join:
if column.startswith('instances'):
query = query.options(joinedload_all(column))
result = query.first()
if not result:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
return result
@require_context
def security_group_get_by_name(context, project_id, group_name,
columns_to_join=None):
query = _security_group_get_query(context,
read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter_by(name=group_name)
if columns_to_join is None:
columns_to_join = ['instances', 'rules.grantee_group']
for column in columns_to_join:
query = query.options(joinedload_all(column))
result = query.first()
if not result:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
return result
@require_context
def security_group_get_by_project(context, project_id):
return _security_group_get_query(context, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@require_context
def security_group_get_by_instance(context, instance_uuid):
return _security_group_get_query(context, read_deleted="no").\
join(models.SecurityGroup.instances).\
filter_by(uuid=instance_uuid).\
all()
@require_context
def security_group_in_use(context, group_id):
session = get_session()
with session.begin():
# Are there any instances that haven't been deleted
# that include this group?
inst_assoc = model_query(context,
models.SecurityGroupInstanceAssociation,
read_deleted="no", session=session).\
filter_by(security_group_id=group_id).\
all()
for ia in inst_assoc:
num_instances = model_query(context, models.Instance,
session=session, read_deleted="no").\
filter_by(uuid=ia.instance_uuid).\
count()
if num_instances:
return True
return False
@require_context
def security_group_create(context, values):
return _security_group_create(context, values)
@require_context
def security_group_update(context, security_group_id, values,
columns_to_join=None):
session = get_session()
with session.begin():
query = model_query(context, models.SecurityGroup,
session=session).filter_by(id=security_group_id)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload_all(column))
security_group_ref = query.first()
if not security_group_ref:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
security_group_ref.update(values)
name = security_group_ref['name']
project_id = security_group_ref['project_id']
try:
security_group_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=project_id,
security_group_name=name)
return security_group_ref
def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id."""
try:
return _security_group_ensure_default(context)
except exception.SecurityGroupExists:
# NOTE(rpodolyaka): a concurrent transaction has succeeded first,
# suppress the error and proceed
return security_group_get_by_name(context, context.project_id,
'default')
def _security_group_ensure_default(context, session=None):
if session is None:
session = get_session()
with session.begin(subtransactions=True):
try:
default_group = _security_group_get_by_names(context,
session,
context.project_id,
['default'])[0]
except exception.NotFound:
values = {'name': 'default',
'description': 'default',
'user_id': context.user_id,
'project_id': context.project_id}
default_group = _security_group_create(context, values,
session=session)
usage = model_query(context, models.QuotaUsage,
read_deleted="no", session=session).\
filter_by(project_id=context.project_id).\
filter_by(user_id=context.user_id).\
filter_by(resource='security_groups')
# Create quota usage for auto created default security group
if not usage.first():
_quota_usage_create(context.project_id,
context.user_id,
'security_groups',
1, 0,
None,
session=session)
else:
usage.update({'in_use': int(usage.first().in_use) + 1})
default_rules = _security_group_rule_get_default_query(context,
session=session).all()
for default_rule in default_rules:
# This is suboptimal, it should be programmatic to know
# the values of the default_rule
rule_values = {'protocol': default_rule.protocol,
'from_port': default_rule.from_port,
'to_port': default_rule.to_port,
'cidr': default_rule.cidr,
'parent_group_id': default_group.id,
}
_security_group_rule_create(context,
rule_values,
session=session)
return default_group
@require_context
def security_group_destroy(context, security_group_id):
session = get_session()
with session.begin():
model_query(context, models.SecurityGroup,
session=session).\
filter_by(id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupInstanceAssociation,
session=session).\
filter_by(security_group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule,
session=session).\
filter_by(group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule,
session=session).\
filter_by(parent_group_id=security_group_id).\
soft_delete()
def _security_group_count_by_project_and_user(context, project_id, user_id,
session=None):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.SecurityGroup, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
count()
###################
def _security_group_rule_create(context, values, session=None):
security_group_rule_ref = models.SecurityGroupIngressRule()
security_group_rule_ref.update(values)
security_group_rule_ref.save(session=session)
return security_group_rule_ref
def _security_group_rule_get_query(context, session=None):
return model_query(context, models.SecurityGroupIngressRule,
session=session)
@require_context
def security_group_rule_get(context, security_group_rule_id):
result = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
first())
if not result:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
return result
@require_context
def security_group_rule_get_by_security_group(context, security_group_id,
columns_to_join=None):
if columns_to_join is None:
columns_to_join = ['grantee_group.instances.system_metadata',
'grantee_group.instances.info_cache']
query = (_security_group_rule_get_query(context).
filter_by(parent_group_id=security_group_id))
for column in columns_to_join:
query = query.options(joinedload_all(column))
return query.all()
@require_context
def security_group_rule_get_by_security_group_grantee(context,
security_group_id):
return (_security_group_rule_get_query(context).
filter_by(group_id=security_group_id).
all())
@require_context
def security_group_rule_create(context, values):
return _security_group_rule_create(context, values)
@require_context
def security_group_rule_destroy(context, security_group_rule_id):
count = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
soft_delete())
if count == 0:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
@require_context
def security_group_rule_count_by_group(context, security_group_id):
return (model_query(context, models.SecurityGroupIngressRule,
read_deleted="no").
filter_by(parent_group_id=security_group_id).
count())
#
###################
def _security_group_rule_get_default_query(context, session=None):
return model_query(context, models.SecurityGroupIngressDefaultRule,
session=session)
@require_context
def security_group_default_rule_get(context, security_group_rule_default_id):
result = _security_group_rule_get_default_query(context).\
filter_by(id=security_group_rule_default_id).\
first()
if not result:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
return result
@require_admin_context
def security_group_default_rule_destroy(context,
security_group_rule_default_id):
session = get_session()
with session.begin():
count = _security_group_rule_get_default_query(context,
session=session).\
filter_by(id=security_group_rule_default_id).\
soft_delete()
if count == 0:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
@require_admin_context
def security_group_default_rule_create(context, values):
security_group_default_rule_ref = models.SecurityGroupIngressDefaultRule()
security_group_default_rule_ref.update(values)
security_group_default_rule_ref.save()
return security_group_default_rule_ref
@require_context
def security_group_default_rule_list(context):
return _security_group_rule_get_default_query(context).\
all()
###################
@require_admin_context
def provider_fw_rule_create(context, rule):
fw_rule_ref = models.ProviderFirewallRule()
fw_rule_ref.update(rule)
fw_rule_ref.save()
return fw_rule_ref
@require_admin_context
def provider_fw_rule_get_all(context):
return model_query(context, models.ProviderFirewallRule).all()
@require_admin_context
def provider_fw_rule_destroy(context, rule_id):
session = get_session()
with session.begin():
session.query(models.ProviderFirewallRule).\
filter_by(id=rule_id).\
soft_delete()
###################
@require_context
def project_get_networks(context, project_id, associate=True):
# NOTE(tr3buchet): as before this function will associate
# a project with a network if it doesn't have one and
# associate is true
result = model_query(context, models.Network, read_deleted="no").\
filter_by(project_id=project_id).\
all()
if not result:
if not associate:
return []
return [network_associate(context, project_id)]
return result
###################
@require_admin_context
def migration_create(context, values):
migration = models.Migration()
migration.update(values)
migration.save()
return migration
@require_admin_context
def migration_update(context, id, values):
session = get_session()
with session.begin():
migration = _migration_get(context, id, session=session)
migration.update(values)
return migration
def _migration_get(context, id, session=None):
result = model_query(context, models.Migration, session=session,
read_deleted="yes").\
filter_by(id=id).\
first()
if not result:
raise exception.MigrationNotFound(migration_id=id)
return result
@require_admin_context
def migration_get(context, id):
return _migration_get(context, id)
@require_admin_context
def migration_get_by_instance_and_status(context, instance_uuid, status):
result = model_query(context, models.Migration, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid).\
filter_by(status=status).\
first()
if not result:
raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
status=status)
return result
@require_admin_context
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
dest_compute, use_slave=False):
confirm_window = (timeutils.utcnow() -
datetime.timedelta(seconds=confirm_window))
return model_query(context, models.Migration, read_deleted="yes",
use_slave=use_slave).\
filter(models.Migration.updated_at <= confirm_window).\
filter_by(status="finished").\
filter_by(dest_compute=dest_compute).\
all()
@require_admin_context
def migration_get_in_progress_by_host_and_node(context, host, node):
return model_query(context, models.Migration).\
filter(or_(and_(models.Migration.source_compute == host,
models.Migration.source_node == node),
and_(models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(~models.Migration.status.in_(['confirmed', 'reverted',
'error'])).\
options(joinedload_all('instance.system_metadata')).\
all()
@require_admin_context
def migration_get_all_by_filters(context, filters):
query = model_query(context, models.Migration)
if "status" in filters:
query = query.filter(models.Migration.status == filters["status"])
if "host" in filters:
host = filters["host"]
query = query.filter(or_(models.Migration.source_compute == host,
models.Migration.dest_compute == host))
return query.all()
##################
def console_pool_create(context, values):
pool = models.ConsolePool()
pool.update(values)
try:
pool.save()
except db_exc.DBDuplicateEntry:
raise exception.ConsolePoolExists(
host=values["host"],
console_type=values["console_type"],
compute_host=values["compute_host"],
)
return pool
def console_pool_get_by_host_type(context, compute_host, host,
console_type):
result = model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
filter_by(compute_host=compute_host).\
options(joinedload('consoles')).\
first()
if not result:
raise exception.ConsolePoolNotFoundForHostType(
host=host, console_type=console_type,
compute_host=compute_host)
return result
def console_pool_get_all_by_host_type(context, host, console_type):
return model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
options(joinedload('consoles')).\
all()
def console_create(context, values):
console = models.Console()
console.update(values)
console.save()
return console
def console_delete(context, console_id):
session = get_session()
with session.begin():
# NOTE(mdragon): consoles are meant to be transient.
session.query(models.Console).\
filter_by(id=console_id).\
delete()
def console_get_by_pool_instance(context, pool_id, instance_uuid):
result = model_query(context, models.Console, read_deleted="yes").\
filter_by(pool_id=pool_id).\
filter_by(instance_uuid=instance_uuid).\
options(joinedload('pool')).\
first()
if not result:
raise exception.ConsoleNotFoundInPoolForInstance(
pool_id=pool_id, instance_uuid=instance_uuid)
return result
def console_get_all_by_instance(context, instance_uuid, columns_to_join=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload(column))
return query.all()
def console_get(context, console_id, instance_uuid=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(id=console_id).\
options(joinedload('pool'))
if instance_uuid is not None:
query = query.filter_by(instance_uuid=instance_uuid)
result = query.first()
if not result:
if instance_uuid:
raise exception.ConsoleNotFoundForInstance(
console_id=console_id, instance_uuid=instance_uuid)
else:
raise exception.ConsoleNotFound(console_id=console_id)
return result
##################
@require_admin_context
def flavor_create(context, values, projects=None):
"""Create a new instance type. In order to pass in extra specs,
the values dict should contain a 'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
specs = values.get('extra_specs')
specs_refs = []
if specs:
for k, v in specs.iteritems():
specs_ref = models.InstanceTypeExtraSpecs()
specs_ref['key'] = k
specs_ref['value'] = v
specs_refs.append(specs_ref)
values['extra_specs'] = specs_refs
instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values)
if projects is None:
projects = []
session = get_session()
with session.begin():
try:
instance_type_ref.save()
except db_exc.DBDuplicateEntry as e:
if 'flavorid' in e.columns:
raise exception.FlavorIdExists(flavor_id=values['flavorid'])
raise exception.FlavorExists(name=values['name'])
except Exception as e:
raise db_exc.DBError(e)
for project in set(projects):
access_ref = models.InstanceTypeProjects()
access_ref.update({"instance_type_id": instance_type_ref.id,
"project_id": project})
access_ref.save()
return _dict_with_extra_specs(instance_type_ref)
def _dict_with_extra_specs(inst_type_query):
"""Takes an instance or instance type query returned
by sqlalchemy and returns it as a dictionary, converting the
extra_specs entry from a list of dicts:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = {x['key']: x['value']
for x in inst_type_query['extra_specs']}
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
def _flavor_get_query(context, session=None, read_deleted=None):
query = model_query(context, models.InstanceTypes, session=session,
read_deleted=read_deleted).\
options(joinedload('extra_specs'))
if not context.is_admin:
the_filter = [models.InstanceTypes.is_public == true()]
the_filter.extend([
models.InstanceTypes.projects.any(project_id=context.project_id)
])
query = query.filter(or_(*the_filter))
return query
@require_context
def flavor_get_all(context, inactive=False, filters=None,
sort_key='flavorid', sort_dir='asc', limit=None,
marker=None):
"""Returns all flavors.
"""
filters = filters or {}
# FIXME(sirp): now that we have the `disabled` field for flavors, we
# should probably remove the use of `deleted` to mark inactive. `deleted`
# should mean truly deleted, e.g. we can safely purge the record out of the
# database.
read_deleted = "yes" if inactive else "no"
query = _flavor_get_query(context, read_deleted=read_deleted)
if 'min_memory_mb' in filters:
query = query.filter(
models.InstanceTypes.memory_mb >= filters['min_memory_mb'])
if 'min_root_gb' in filters:
query = query.filter(
models.InstanceTypes.root_gb >= filters['min_root_gb'])
if 'disabled' in filters:
query = query.filter(
models.InstanceTypes.disabled == filters['disabled'])
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.InstanceTypes.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
the_filter.extend([
models.InstanceTypes.projects.any(
project_id=context.project_id, deleted=0)
])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
marker_row = None
if marker is not None:
marker_row = _flavor_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=marker).\
first()
if not marker_row:
raise exception.MarkerNotFound(marker)
query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit,
[sort_key, 'id'],
marker=marker_row,
sort_dir=sort_dir)
inst_types = query.all()
return [_dict_with_extra_specs(i) for i in inst_types]
def _flavor_get_id_from_flavor_query(context, flavor_id, session=None):
return model_query(context, models.InstanceTypes,
(models.InstanceTypes.id,),
read_deleted="no", session=session).\
filter_by(flavorid=flavor_id)
def _flavor_get_id_from_flavor(context, flavor_id, session=None):
result = _flavor_get_id_from_flavor_query(context, flavor_id,
session=session).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return result[0]
@require_context
def flavor_get(context, id):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
filter_by(id=id).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=id)
return _dict_with_extra_specs(result)
@require_context
def flavor_get_by_name(context, name):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
filter_by(name=name).\
first()
if not result:
raise exception.FlavorNotFoundByName(flavor_name=name)
return _dict_with_extra_specs(result)
@require_context
def flavor_get_by_flavor_id(context, flavor_id, read_deleted):
"""Returns a dict describing specific flavor_id."""
result = _flavor_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=flavor_id).\
order_by(asc("deleted"), asc("id")).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return _dict_with_extra_specs(result)
@require_admin_context
def flavor_destroy(context, name):
"""Marks specific flavor as deleted."""
session = get_session()
with session.begin():
ref = model_query(context, models.InstanceTypes, session=session,
read_deleted="no").\
filter_by(name=name).\
first()
if not ref:
raise exception.FlavorNotFoundByName(flavor_name=name)
ref.soft_delete(session=session)
model_query(context, models.InstanceTypeExtraSpecs,
session=session, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
model_query(context, models.InstanceTypeProjects,
session=session, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
def _flavor_access_query(context, session=None):
return model_query(context, models.InstanceTypeProjects, session=session,
read_deleted="no")
@require_admin_context
def flavor_access_get_by_flavor_id(context, flavor_id):
"""Get flavor access list by flavor id."""
instance_type_id_subq = \
_flavor_get_id_from_flavor_query(context, flavor_id)
access_refs = _flavor_access_query(context).\
filter_by(instance_type_id=instance_type_id_subq).\
all()
return access_refs
@require_admin_context
def flavor_access_add(context, flavor_id, project_id):
"""Add given tenant to the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
access_ref = models.InstanceTypeProjects()
access_ref.update({"instance_type_id": instance_type_id,
"project_id": project_id})
try:
access_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FlavorAccessExists(flavor_id=flavor_id,
project_id=project_id)
return access_ref
@require_admin_context
def flavor_access_remove(context, flavor_id, project_id):
"""Remove given tenant from the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
count = _flavor_access_query(context).\
filter_by(instance_type_id=instance_type_id).\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
if count == 0:
raise exception.FlavorAccessNotFound(flavor_id=flavor_id,
project_id=project_id)
def _flavor_extra_specs_get_query(context, flavor_id, session=None):
instance_type_id_subq = \
_flavor_get_id_from_flavor_query(context, flavor_id)
return model_query(context, models.InstanceTypeExtraSpecs, session=session,
read_deleted="no").\
filter_by(instance_type_id=instance_type_id_subq)
@require_context
def flavor_extra_specs_get(context, flavor_id):
rows = _flavor_extra_specs_get_query(context, flavor_id).all()
return {row['key']: row['value'] for row in rows}
@require_context
def flavor_extra_specs_delete(context, flavor_id, key):
result = _flavor_extra_specs_get_query(context, flavor_id).\
filter(models.InstanceTypeExtraSpecs.key == key).\
soft_delete(synchronize_session=False)
# did not find the extra spec
if result == 0:
raise exception.FlavorExtraSpecsNotFound(
extra_specs_key=key, flavor_id=flavor_id)
@require_context
def flavor_extra_specs_update_or_create(context, flavor_id, specs,
max_retries=10):
for attempt in xrange(max_retries):
try:
session = get_session()
with session.begin():
instance_type_id = _flavor_get_id_from_flavor(context,
flavor_id, session)
spec_refs = model_query(context, models.InstanceTypeExtraSpecs,
session=session, read_deleted="no").\
filter_by(instance_type_id=instance_type_id).\
filter(models.InstanceTypeExtraSpecs.key.in_(specs.keys())).\
all()
existing_keys = set()
for spec_ref in spec_refs:
key = spec_ref["key"]
existing_keys.add(key)
spec_ref.update({"value": specs[key]})
for key, value in specs.iteritems():
if key in existing_keys:
continue
spec_ref = models.InstanceTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"instance_type_id": instance_type_id})
session.add(spec_ref)
return specs
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
if attempt == max_retries - 1:
raise exception.FlavorExtraSpecUpdateCreateFailed(
id=flavor_id, retries=max_retries)
####################
@require_admin_context
def cell_create(context, values):
cell = models.Cell()
cell.update(values)
try:
cell.save()
except db_exc.DBDuplicateEntry:
raise exception.CellExists(name=values['name'])
return cell
def _cell_get_by_name_query(context, cell_name, session=None):
return model_query(context, models.Cell,
session=session).filter_by(name=cell_name)
@require_admin_context
def cell_update(context, cell_name, values):
session = get_session()
with session.begin():
cell_query = _cell_get_by_name_query(context, cell_name,
session=session)
if not cell_query.update(values):
raise exception.CellNotFound(cell_name=cell_name)
cell = cell_query.first()
return cell
@require_admin_context
def cell_delete(context, cell_name):
return _cell_get_by_name_query(context, cell_name).soft_delete()
@require_admin_context
def cell_get(context, cell_name):
result = _cell_get_by_name_query(context, cell_name).first()
if not result:
raise exception.CellNotFound(cell_name=cell_name)
return result
@require_admin_context
def cell_get_all(context):
return model_query(context, models.Cell, read_deleted="no").all()
########################
# User-provided metadata
def _instance_metadata_get_multi(context, instance_uuids,
session=None, use_slave=False):
if not instance_uuids:
return []
return model_query(context, models.InstanceMetadata,
session=session, use_slave=use_slave).\
filter(
models.InstanceMetadata.instance_uuid.in_(instance_uuids))
def _instance_metadata_get_query(context, instance_uuid, session=None):
return model_query(context, models.InstanceMetadata, session=session,
read_deleted="no").\
filter_by(instance_uuid=instance_uuid)
@require_context
def instance_metadata_get(context, instance_uuid):
rows = _instance_metadata_get_query(context, instance_uuid).all()
return {row['key']: row['value'] for row in rows}
@require_context
@_retry_on_deadlock
def instance_metadata_delete(context, instance_uuid, key):
_instance_metadata_get_query(context, instance_uuid).\
filter_by(key=key).\
soft_delete()
@require_context
@_retry_on_deadlock
def instance_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
session = get_session()
with session.begin(subtransactions=True):
if delete:
_instance_metadata_get_query(context, instance_uuid,
session=session).\
filter(~models.InstanceMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_metadata_get_query(context, instance_uuid,
session=session).\
filter(models.InstanceMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
session.add(meta_ref)
return metadata
#######################
# System-owned metadata
def _instance_system_metadata_get_multi(context, instance_uuids,
session=None, use_slave=False):
if not instance_uuids:
return []
return model_query(context, models.InstanceSystemMetadata,
session=session, use_slave=use_slave).\
filter(
models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids))
def _instance_system_metadata_get_query(context, instance_uuid, session=None):
return model_query(context, models.InstanceSystemMetadata,
session=session).\
filter_by(instance_uuid=instance_uuid)
@require_context
def instance_system_metadata_get(context, instance_uuid):
rows = _instance_system_metadata_get_query(context, instance_uuid).all()
return {row['key']: row['value'] for row in rows}
@require_context
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
session = get_session()
with session.begin(subtransactions=True):
if delete:
_instance_system_metadata_get_query(context, instance_uuid,
session=session).\
filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_system_metadata_get_query(context, instance_uuid,
session=session).\
filter(models.InstanceSystemMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceSystemMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
session.add(meta_ref)
return metadata
####################
@require_admin_context
def agent_build_create(context, values):
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
try:
agent_build_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.AgentBuildExists(hypervisor=values['hypervisor'],
os=values['os'], architecture=values['architecture'])
return agent_build_ref
@require_admin_context
def agent_build_get_by_triple(context, hypervisor, os, architecture):
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
filter_by(os=os).\
filter_by(architecture=architecture).\
first()
@require_admin_context
def agent_build_get_all(context, hypervisor=None):
if hypervisor:
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
all()
else:
return model_query(context, models.AgentBuild, read_deleted="no").\
all()
@require_admin_context
def agent_build_destroy(context, agent_build_id):
rows_affected = model_query(context, models.AgentBuild).filter_by(
id=agent_build_id).soft_delete()
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
@require_admin_context
def agent_build_update(context, agent_build_id, values):
rows_affected = model_query(context, models.AgentBuild).\
filter_by(id=agent_build_id).\
update(values)
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
####################
@require_context
def bw_usage_get(context, uuid, start_period, mac, use_slave=False):
return model_query(context, models.BandwidthUsage, read_deleted="yes",
use_slave=use_slave).\
filter_by(start_period=start_period).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
first()
@require_context
def bw_usage_get_by_uuids(context, uuids, start_period, use_slave=False):
return (
model_query(context, models.BandwidthUsage, read_deleted="yes",
use_slave=use_slave).
filter(models.BandwidthUsage.uuid.in_(uuids)).
filter_by(start_period=start_period).
all()
)
@require_context
@_retry_on_deadlock
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed=None):
session = get_session()
if last_refreshed is None:
last_refreshed = timeutils.utcnow()
# NOTE(comstud): More often than not, we'll be updating records vs
# creating records. Optimize accordingly, trying to update existing
# records. Fall back to creation when no rows are updated.
with session.begin():
values = {'last_refreshed': last_refreshed,
'last_ctr_in': last_ctr_in,
'last_ctr_out': last_ctr_out,
'bw_in': bw_in,
'bw_out': bw_out}
rows = model_query(context, models.BandwidthUsage,
session=session, read_deleted="yes").\
filter_by(start_period=start_period).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
update(values, synchronize_session=False)
if rows:
return
bwusage = models.BandwidthUsage()
bwusage.start_period = start_period
bwusage.uuid = uuid
bwusage.mac = mac
bwusage.last_refreshed = last_refreshed
bwusage.bw_in = bw_in
bwusage.bw_out = bw_out
bwusage.last_ctr_in = last_ctr_in
bwusage.last_ctr_out = last_ctr_out
try:
bwusage.save(session=session)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to create
# the usage entry at the same time. First one wins.
pass
####################
@require_context
def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time."""
return model_query(context, models.VolumeUsage, read_deleted="yes").\
filter(or_(models.VolumeUsage.tot_last_refreshed == null(),
models.VolumeUsage.tot_last_refreshed > begin,
models.VolumeUsage.curr_last_refreshed == null(),
models.VolumeUsage.curr_last_refreshed > begin,
)).\
all()
@require_context
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id, availability_zone,
update_totals=False):
session = get_session()
refreshed = timeutils.utcnow()
with session.begin():
values = {}
# NOTE(dricco): We will be mostly updating current usage records vs
# updating total or creating records. Optimize accordingly.
if not update_totals:
values = {'curr_last_refreshed': refreshed,
'curr_reads': rd_req,
'curr_read_bytes': rd_bytes,
'curr_writes': wr_req,
'curr_write_bytes': wr_bytes,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
else:
values = {'tot_last_refreshed': refreshed,
'tot_reads': models.VolumeUsage.tot_reads + rd_req,
'tot_read_bytes': models.VolumeUsage.tot_read_bytes +
rd_bytes,
'tot_writes': models.VolumeUsage.tot_writes + wr_req,
'tot_write_bytes': models.VolumeUsage.tot_write_bytes +
wr_bytes,
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
current_usage = model_query(context, models.VolumeUsage,
session=session, read_deleted="yes").\
filter_by(volume_id=id).\
first()
if current_usage:
if (rd_req < current_usage['curr_reads'] or
rd_bytes < current_usage['curr_read_bytes'] or
wr_req < current_usage['curr_writes'] or
wr_bytes < current_usage['curr_write_bytes']):
LOG.info(_LI("Volume(%s) has lower stats then what is in "
"the database. Instance must have been rebooted "
"or crashed. Updating totals."), id)
if not update_totals:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'])
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'])
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'])
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'])
else:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'] +
rd_req)
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'] + rd_bytes)
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'] +
wr_req)
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'] + wr_bytes)
current_usage.update(values)
current_usage.save(session=session)
session.refresh(current_usage)
return current_usage
vol_usage = models.VolumeUsage()
vol_usage.volume_id = id
vol_usage.instance_uuid = instance_id
vol_usage.project_id = project_id
vol_usage.user_id = user_id
vol_usage.availability_zone = availability_zone
if not update_totals:
vol_usage.curr_last_refreshed = refreshed
vol_usage.curr_reads = rd_req
vol_usage.curr_read_bytes = rd_bytes
vol_usage.curr_writes = wr_req
vol_usage.curr_write_bytes = wr_bytes
else:
vol_usage.tot_last_refreshed = refreshed
vol_usage.tot_reads = rd_req
vol_usage.tot_read_bytes = rd_bytes
vol_usage.tot_writes = wr_req
vol_usage.tot_write_bytes = wr_bytes
vol_usage.save(session=session)
return vol_usage
####################
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(id=image_id).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_id)
return result
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(uuid=image_uuid).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_uuid)
return result
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid."""
try:
s3_image_ref = models.S3Image()
s3_image_ref.update({'uuid': image_uuid})
s3_image_ref.save()
except Exception as e:
raise db_exc.DBError(e)
return s3_image_ref
####################
def _aggregate_get_query(context, model_class, id_field=None, id=None,
session=None, read_deleted=None):
columns_to_join = {models.Aggregate: ['_hosts', '_metadata']}
query = model_query(context, model_class, session=session,
read_deleted=read_deleted)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
def aggregate_create(context, values, metadata=None):
session = get_session()
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
session=session,
read_deleted='no')
aggregate = query.first()
if not aggregate:
aggregate = models.Aggregate()
aggregate.update(values)
aggregate.save(session=session)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this aggregate.
aggregate._hosts = []
aggregate._metadata = []
else:
raise exception.AggregateNameExists(aggregate_name=values['name'])
if metadata:
aggregate_metadata_add(context, aggregate.id, metadata)
return aggregate_get(context, aggregate.id)
def aggregate_get(context, aggregate_id):
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id)
aggregate = query.first()
if not aggregate:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
return aggregate
def aggregate_get_by_host(context, host, key=None):
"""Return rows that match host (mandatory) and metadata key (optional).
:param host matches host, and is required.
:param key Matches metadata key, if not None.
"""
query = model_query(context, models.Aggregate)
query = query.options(joinedload('_hosts'))
query = query.options(joinedload('_metadata'))
query = query.join('_hosts')
query = query.filter(models.AggregateHost.host == host)
if key:
query = query.join("_metadata").filter(
models.AggregateMetadata.key == key)
return query.all()
def aggregate_metadata_get_by_host(context, host, key=None):
query = model_query(context, models.Aggregate)
query = query.join("_hosts")
query = query.join("_metadata")
query = query.filter(models.AggregateHost.host == host)
query = query.options(contains_eager("_metadata"))
if key:
query = query.filter(models.AggregateMetadata.key == key)
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
return dict(metadata)
def aggregate_metadata_get_by_metadata_key(context, aggregate_id, key):
query = model_query(context, models.Aggregate)
query = query.join("_metadata")
query = query.filter(models.Aggregate.id == aggregate_id)
query = query.options(contains_eager("_metadata"))
query = query.filter(models.AggregateMetadata.key == key)
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
return dict(metadata)
def aggregate_get_by_metadata_key(context, key):
"""Return rows that match metadata key.
:param key Matches metadata key.
"""
query = model_query(context, models.Aggregate)
query = query.join("_metadata")
query = query.filter(models.AggregateMetadata.key == key)
query = query.options(contains_eager("_metadata"))
query = query.options(joinedload("_hosts"))
return query.all()
def aggregate_update(context, aggregate_id, values):
session = get_session()
if "name" in values:
aggregate_by_name = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
session=session,
read_deleted='no').first())
if aggregate_by_name and aggregate_by_name.id != aggregate_id:
# there is another aggregate with the new name
raise exception.AggregateNameExists(aggregate_name=values['name'])
aggregate = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id,
session=session).first())
set_delete = True
if aggregate:
if "availability_zone" in values:
az = values.pop('availability_zone')
if 'metadata' not in values:
values['metadata'] = {'availability_zone': az}
set_delete = False
else:
values['metadata']['availability_zone'] = az
metadata = values.get('metadata')
if metadata is not None:
aggregate_metadata_add(context,
aggregate_id,
values.pop('metadata'),
set_delete=set_delete)
aggregate.update(values)
aggregate.save(session=session)
values['metadata'] = metadata
return aggregate_get(context, aggregate.id)
else:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
def aggregate_delete(context, aggregate_id):
session = get_session()
with session.begin():
count = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id,
session=session).\
soft_delete()
if count == 0:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
# Delete Metadata
model_query(context,
models.AggregateMetadata, session=session).\
filter_by(aggregate_id=aggregate_id).\
soft_delete()
def aggregate_get_all(context):
return _aggregate_get_query(context, models.Aggregate).all()
def _aggregate_metadata_get_query(context, aggregate_id, session=None,
read_deleted="yes"):
return model_query(context,
models.AggregateMetadata,
read_deleted=read_deleted,
session=session).\
filter_by(aggregate_id=aggregate_id)
@require_aggregate_exists
def aggregate_metadata_get(context, aggregate_id):
rows = model_query(context,
models.AggregateMetadata).\
filter_by(aggregate_id=aggregate_id).all()
return {r['key']: r['value'] for r in rows}
@require_aggregate_exists
def aggregate_metadata_delete(context, aggregate_id, key):
count = _aggregate_get_query(context,
models.AggregateMetadata,
models.AggregateMetadata.aggregate_id,
aggregate_id).\
filter_by(key=key).\
soft_delete()
if count == 0:
raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id,
metadata_key=key)
@require_aggregate_exists
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False,
max_retries=10):
all_keys = metadata.keys()
for attempt in xrange(max_retries):
try:
session = get_session()
with session.begin():
query = _aggregate_metadata_get_query(context, aggregate_id,
read_deleted='no',
session=session)
if set_delete:
query.filter(~models.AggregateMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
query = \
query.filter(models.AggregateMetadata.key.in_(all_keys))
already_existing_keys = set()
for meta_ref in query.all():
key = meta_ref.key
meta_ref.update({"value": metadata[key]})
already_existing_keys.add(key)
new_entries = []
for key, value in metadata.iteritems():
if key in already_existing_keys:
continue
new_entries.append({"key": key,
"value": value,
"aggregate_id": aggregate_id})
if new_entries:
session.execute(
models.AggregateMetadata.__table__.insert(),
new_entries)
return metadata
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
with excutils.save_and_reraise_exception() as ctxt:
if attempt < max_retries - 1:
ctxt.reraise = False
else:
msg = _("Add metadata failed for aggregate %(id)s after "
"%(retries)s retries") % {"id": aggregate_id,
"retries": max_retries}
LOG.warn(msg)
@require_aggregate_exists
def aggregate_host_get_all(context, aggregate_id):
rows = model_query(context,
models.AggregateHost).\
filter_by(aggregate_id=aggregate_id).all()
return [r.host for r in rows]
@require_aggregate_exists
def aggregate_host_delete(context, aggregate_id, host):
count = _aggregate_get_query(context,
models.AggregateHost,
models.AggregateHost.aggregate_id,
aggregate_id).\
filter_by(host=host).\
soft_delete()
if count == 0:
raise exception.AggregateHostNotFound(aggregate_id=aggregate_id,
host=host)
@require_aggregate_exists
def aggregate_host_add(context, aggregate_id, host):
host_ref = models.AggregateHost()
host_ref.update({"host": host, "aggregate_id": aggregate_id})
try:
host_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.AggregateHostExists(host=host,
aggregate_id=aggregate_id)
return host_ref
################
def instance_fault_create(context, values):
"""Create a new InstanceFault."""
fault_ref = models.InstanceFault()
fault_ref.update(values)
fault_ref.save()
return dict(fault_ref.iteritems())
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
if not instance_uuids:
return {}
rows = model_query(context, models.InstanceFault, read_deleted='no').\
filter(models.InstanceFault.instance_uuid.in_(
instance_uuids)).\
order_by(desc("created_at"), desc("id")).\
all()
output = {}
for instance_uuid in instance_uuids:
output[instance_uuid] = []
for row in rows:
data = dict(row.iteritems())
output[row['instance_uuid']].append(data)
return output
##################
def action_start(context, values):
convert_objects_related_datetimes(values, 'start_time')
action_ref = models.InstanceAction()
action_ref.update(values)
action_ref.save()
return action_ref
def action_finish(context, values):
convert_objects_related_datetimes(values, 'start_time', 'finish_time')
session = get_session()
with session.begin():
query = model_query(context, models.InstanceAction, session=session).\
filter_by(instance_uuid=values['instance_uuid']).\
filter_by(request_id=values['request_id'])
if query.update(values) != 1:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
return query.one()
def actions_get(context, instance_uuid):
"""Get all instance actions for the provided uuid."""
actions = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=instance_uuid).\
order_by(desc("created_at"), desc("id")).\
all()
return actions
def action_get_by_request_id(context, instance_uuid, request_id):
"""Get the action by request_id and given instance."""
action = _action_get_by_request_id(context, instance_uuid, request_id)
return action
def _action_get_by_request_id(context, instance_uuid, request_id,
session=None):
result = model_query(context, models.InstanceAction, session=session).\
filter_by(instance_uuid=instance_uuid).\
filter_by(request_id=request_id).\
first()
return result
def action_event_start(context, values):
"""Start an event on an instance action."""
convert_objects_related_datetimes(values, 'start_time')
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'], session)
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
values['action_id'] = action['id']
event_ref = models.InstanceActionEvent()
event_ref.update(values)
session.add(event_ref)
return event_ref
def action_event_finish(context, values):
"""Finish an event on an instance action."""
convert_objects_related_datetimes(values, 'start_time', 'finish_time')
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'], session)
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
event_ref = model_query(context, models.InstanceActionEvent,
session=session).\
filter_by(action_id=action['id']).\
filter_by(event=values['event']).\
first()
if not event_ref:
raise exception.InstanceActionEventNotFound(action_id=action['id'],
event=values['event'])
event_ref.update(values)
if values['result'].lower() == 'error':
action.update({'message': 'Error'})
return event_ref
def action_events_get(context, action_id):
events = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
order_by(desc("created_at"), desc("id")).\
all()
return events
def action_event_get_by_id(context, action_id, event_id):
event = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
filter_by(id=event_id).\
first()
return event
##################
@require_context
def ec2_instance_create(context, instance_uuid, id=None):
"""Create ec2 compatible instance by provided uuid."""
ec2_instance_ref = models.InstanceIdMapping()
ec2_instance_ref.update({'uuid': instance_uuid})
if id is not None:
ec2_instance_ref.update({'id': id})
ec2_instance_ref.save()
return ec2_instance_ref
@require_context
def ec2_instance_get_by_uuid(context, instance_uuid):
result = _ec2_instance_get_query(context).\
filter_by(uuid=instance_uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_uuid)
return result
@require_context
def ec2_instance_get_by_id(context, instance_id):
result = _ec2_instance_get_query(context).\
filter_by(id=instance_id).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
@require_context
def get_instance_uuid_by_ec2_id(context, ec2_id):
result = ec2_instance_get_by_id(context, ec2_id)
return result['uuid']
def _ec2_instance_get_query(context, session=None):
return model_query(context,
models.InstanceIdMapping,
session=session,
read_deleted='yes')
def _task_log_get_query(context, task_name, period_beginning,
period_ending, host=None, state=None, session=None):
query = model_query(context, models.TaskLog, session=session).\
filter_by(task_name=task_name).\
filter_by(period_beginning=period_beginning).\
filter_by(period_ending=period_ending)
if host is not None:
query = query.filter_by(host=host)
if state is not None:
query = query.filter_by(state=state)
return query
@require_admin_context
def task_log_get(context, task_name, period_beginning, period_ending, host,
state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).first()
@require_admin_context
def task_log_get_all(context, task_name, period_beginning, period_ending,
host=None, state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).all()
@require_admin_context
def task_log_begin_task(context, task_name, period_beginning, period_ending,
host, task_items=None, message=None):
task = models.TaskLog()
task.task_name = task_name
task.period_beginning = period_beginning
task.period_ending = period_ending
task.host = host
task.state = "RUNNING"
if message:
task.message = message
if task_items:
task.task_items = task_items
try:
task.save()
except db_exc.DBDuplicateEntry:
raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
@require_admin_context
def task_log_end_task(context, task_name, period_beginning, period_ending,
host, errors, message=None):
values = dict(state="DONE", errors=errors)
if message:
values["message"] = message
session = get_session()
with session.begin():
rows = _task_log_get_query(context, task_name, period_beginning,
period_ending, host, session=session).\
update(values)
if rows == 0:
# It's not running!
raise exception.TaskNotRunning(task_name=task_name, host=host)
def _get_default_deleted_value(table):
# TODO(dripton): It would be better to introspect the actual default value
# from the column, but I don't see a way to do that in the low-level APIs
# of SQLAlchemy 0.7. 0.8 has better introspection APIs, which we should
# use when Nova is ready to require 0.8.
# NOTE(snikitin): We have one table (tags) which is not
# subclass of NovaBase. That is way this table does not contain
# column 'deleted'
if 'deleted' not in table.c:
return
# NOTE(mikal): this is a little confusing. This method returns the value
# that a _not_deleted_ row would have.
deleted_column_type = table.c.deleted.type
if isinstance(deleted_column_type, Integer):
return 0
elif isinstance(deleted_column_type, Boolean):
return False
elif isinstance(deleted_column_type, String):
return ""
else:
return None
@require_admin_context
def archive_deleted_rows_for_table(context, tablename, max_rows):
"""Move up to max_rows rows from one tables to the corresponding
shadow table. The context argument is only used for the decorator.
:returns: number of rows archived
"""
# NOTE(guochbo): There is a circular import, nova.db.sqlalchemy.utils
# imports nova.db.sqlalchemy.api.
from nova.db.sqlalchemy import utils as db_utils
engine = get_engine()
conn = engine.connect()
metadata = MetaData()
metadata.bind = engine
table = Table(tablename, metadata, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
shadow_tablename = _SHADOW_TABLE_PREFIX + tablename
rows_archived = 0
try:
shadow_table = Table(shadow_tablename, metadata, autoload=True)
except NoSuchTableError:
# No corresponding shadow table; skip it.
return rows_archived
if tablename == "dns_domains":
# We have one table (dns_domains) where the key is called
# "domain" rather than "id"
column = table.c.domain
else:
column = table.c.id
# NOTE(guochbo): Use InsertFromSelect and DeleteFromSelect to avoid
# database's limit of maximum parameter in one SQL statement.
query_insert = sql.select([table],
table.c.deleted != default_deleted_value).\
order_by(column).limit(max_rows)
query_delete = sql.select([column],
table.c.deleted != default_deleted_value).\
order_by(column).limit(max_rows)
insert_statement = sqlalchemyutils.InsertFromSelect(
shadow_table, query_insert)
delete_statement = db_utils.DeleteFromSelect(table, query_delete, column)
try:
# Group the insert and delete in a transaction.
with conn.begin():
conn.execute(insert_statement)
result_delete = conn.execute(delete_statement)
except db_exc.DBError:
# TODO(ekudryashova): replace by DBReferenceError when db layer
# raise it.
# A foreign key constraint keeps us from deleting some of
# these rows until we clean up a dependent table. Just
# skip this table for now; we'll come back to it later.
msg = _("IntegrityError detected when archiving table %s") % tablename
LOG.warn(msg)
return rows_archived
rows_archived = result_delete.rowcount
return rows_archived
@require_admin_context
def archive_deleted_rows(context, max_rows=None):
"""Move up to max_rows rows from production tables to the corresponding
shadow tables.
:returns: Number of rows archived.
"""
# The context argument is only used for the decorator.
tablenames = []
for model_class in models.__dict__.itervalues():
if hasattr(model_class, "__tablename__"):
tablenames.append(model_class.__tablename__)
rows_archived = 0
for tablename in tablenames:
rows_archived += archive_deleted_rows_for_table(context, tablename,
max_rows=max_rows - rows_archived)
if rows_archived >= max_rows:
break
return rows_archived
####################
def _instance_group_get_query(context, model_class, id_field=None, id=None,
session=None, read_deleted=None):
columns_to_join = {models.InstanceGroup: ['_policies', '_members']}
query = model_query(context, model_class, session=session,
read_deleted=read_deleted)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
def instance_group_create(context, values, policies=None,
members=None):
"""Create a new group."""
uuid = values.get('uuid', None)
if uuid is None:
uuid = uuidutils.generate_uuid()
values['uuid'] = uuid
session = get_session()
with session.begin():
try:
group = models.InstanceGroup()
group.update(values)
group.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.InstanceGroupIdExists(group_uuid=uuid)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this instance group.
group._policies = []
group._members = []
if policies:
_instance_group_policies_add(context, group.id, policies,
session=session)
if members:
_instance_group_members_add(context, group.id, members,
session=session)
return instance_group_get(context, uuid)
def instance_group_get(context, group_uuid):
"""Get a specific group by uuid."""
group = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return group
def instance_group_get_by_instance(context, instance_uuid):
session = get_session()
with session.begin():
group_member = model_query(context, models.InstanceGroupMember,
session=session).\
filter_by(instance_id=instance_uuid).\
first()
if not group_member:
raise exception.InstanceGroupNotFound(group_uuid='')
group = _instance_group_get_query(context, models.InstanceGroup,
models.InstanceGroup.id,
group_member.group_id,
session=session).first()
if not group:
raise exception.InstanceGroupNotFound(
group_uuid=group_member.group_id)
return group
def instance_group_update(context, group_uuid, values):
"""Update the attributes of an group.
If values contains a metadata key, it updates the aggregate metadata
too. Similarly for the policies and members.
"""
session = get_session()
with session.begin():
group = model_query(context,
models.InstanceGroup,
session=session).\
filter_by(uuid=group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
policies = values.get('policies')
if policies is not None:
_instance_group_policies_add(context,
group.id,
values.pop('policies'),
set_delete=True,
session=session)
members = values.get('members')
if members is not None:
_instance_group_members_add(context,
group.id,
values.pop('members'),
set_delete=True,
session=session)
group.update(values)
if policies:
values['policies'] = policies
if members:
values['members'] = members
def instance_group_delete(context, group_uuid):
"""Delete an group."""
session = get_session()
with session.begin():
group_id = _instance_group_id(context, group_uuid, session=session)
count = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid,
session=session).soft_delete()
if count == 0:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
# Delete policies, metadata and members
instance_models = [models.InstanceGroupPolicy,
models.InstanceGroupMember]
for model in instance_models:
model_query(context, model, session=session).\
filter_by(group_id=group_id).\
soft_delete()
def instance_group_get_all(context):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).all()
def instance_group_get_all_by_project_id(context, project_id):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).\
filter_by(project_id=project_id).\
all()
def _instance_group_count_by_project_and_user(context, project_id,
user_id, session=None):
return model_query(context, models.InstanceGroup, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
count()
def _instance_group_model_get_query(context, model_class, group_id,
session=None, read_deleted='no'):
return model_query(context,
model_class,
read_deleted=read_deleted,
session=session).\
filter_by(group_id=group_id)
def _instance_group_id(context, group_uuid, session=None):
"""Returns the group database ID for the group UUID."""
result = model_query(context,
models.InstanceGroup,
(models.InstanceGroup.id,),
session=session).\
filter_by(uuid=group_uuid).\
first()
if not result:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return result.id
def _instance_group_members_add(context, id, members, set_delete=False,
session=None):
if not session:
session = get_session()
all_members = set(members)
with session.begin(subtransactions=True):
query = _instance_group_model_get_query(context,
models.InstanceGroupMember,
id,
session=session)
if set_delete:
query.filter(~models.InstanceGroupMember.instance_id.in_(
all_members)).\
soft_delete(synchronize_session=False)
query = query.filter(
models.InstanceGroupMember.instance_id.in_(all_members))
already_existing = set()
for member_ref in query.all():
already_existing.add(member_ref.instance_id)
for instance_id in members:
if instance_id in already_existing:
continue
member_ref = models.InstanceGroupMember()
member_ref.update({'instance_id': instance_id,
'group_id': id})
session.add(member_ref)
return members
def instance_group_members_add(context, group_uuid, members,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_members_add(context, id, members,
set_delete=set_delete)
def instance_group_member_delete(context, group_uuid, instance_id):
id = _instance_group_id(context, group_uuid)
count = _instance_group_get_query(context,
models.InstanceGroupMember,
models.InstanceGroupMember.group_id,
id).\
filter_by(instance_id=instance_id).\
soft_delete()
if count == 0:
raise exception.InstanceGroupMemberNotFound(group_uuid=group_uuid,
instance_id=instance_id)
def instance_group_members_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
instances = model_query(context,
models.InstanceGroupMember,
(models.InstanceGroupMember.instance_id,)).\
filter_by(group_id=id).all()
return [instance[0] for instance in instances]
def _instance_group_policies_add(context, id, policies, set_delete=False,
session=None):
if not session:
session = get_session()
allpols = set(policies)
with session.begin(subtransactions=True):
query = _instance_group_model_get_query(context,
models.InstanceGroupPolicy,
id,
session=session)
if set_delete:
query.filter(~models.InstanceGroupPolicy.policy.in_(allpols)).\
soft_delete(synchronize_session=False)
query = query.filter(models.InstanceGroupPolicy.policy.in_(allpols))
already_existing = set()
for policy_ref in query.all():
already_existing.add(policy_ref.policy)
for policy in policies:
if policy in already_existing:
continue
policy_ref = models.InstanceGroupPolicy()
policy_ref.update({'policy': policy,
'group_id': id})
session.add(policy_ref)
return policies
def instance_group_policies_add(context, group_uuid, policies,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_policies_add(context, id, policies,
set_delete=set_delete)
def instance_group_policy_delete(context, group_uuid, policy):
id = _instance_group_id(context, group_uuid)
count = _instance_group_get_query(context,
models.InstanceGroupPolicy,
models.InstanceGroupPolicy.group_id,
id).\
filter_by(policy=policy).\
soft_delete()
if count == 0:
raise exception.InstanceGroupPolicyNotFound(group_uuid=group_uuid,
policy=policy)
def instance_group_policies_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
policies = model_query(context,
models.InstanceGroupPolicy,
(models.InstanceGroupPolicy.policy,)).\
filter_by(group_id=id).all()
return [policy[0] for policy in policies]
####################
@require_admin_context
def pci_device_get_by_addr(context, node_id, dev_addr):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=dev_addr).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFound(node_id=node_id, address=dev_addr)
return pci_dev_ref
@require_admin_context
def pci_device_get_by_id(context, id):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(id=id).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFoundById(id=id)
return pci_dev_ref
@require_admin_context
def pci_device_get_all_by_node(context, node_id):
return model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
all()
@require_context
def pci_device_get_all_by_instance_uuid(context, instance_uuid):
return model_query(context, models.PciDevice).\
filter_by(status='allocated').\
filter_by(instance_uuid=instance_uuid).\
all()
def _instance_pcidevs_get_multi(context, instance_uuids, session=None):
return model_query(context, models.PciDevice, session=session).\
filter_by(status='allocated').\
filter(models.PciDevice.instance_uuid.in_(instance_uuids))
@require_admin_context
def pci_device_destroy(context, node_id, address):
result = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=address).\
soft_delete()
if not result:
raise exception.PciDeviceNotFound(node_id=node_id, address=address)
@require_admin_context
def pci_device_update(context, node_id, address, values):
session = get_session()
with session.begin():
device = model_query(context, models.PciDevice, session=session,
read_deleted="no").\
filter_by(compute_node_id=node_id).\
filter_by(address=address).\
first()
if not device:
device = models.PciDevice()
device.update(values)
session.add(device)
return device
####################
def instance_tag_add(context, instance_uuid, tag):
session = get_session()
tag_ref = models.Tag()
tag_ref.resource_id = instance_uuid
tag_ref.tag = tag
try:
with session.begin(subtransactions=True):
_check_instance_exists(context, session, instance_uuid)
session.add(tag_ref)
except db_exc.DBDuplicateEntry:
# NOTE(snikitin): We should ignore tags duplicates
pass
return tag_ref
def instance_tag_set(context, instance_uuid, tags):
session = get_session()
with session.begin(subtransactions=True):
_check_instance_exists(context, session, instance_uuid)
existing = session.query(models.Tag.tag).filter_by(
resource_id=instance_uuid).all()
existing = set(row.tag for row in existing)
tags = set(tags)
to_delete = existing - tags
to_add = tags - existing
session.query(models.Tag).filter_by(resource_id=instance_uuid).filter(
models.Tag.tag.in_(to_delete)).delete(synchronize_session=False)
data = [{'resource_id': instance_uuid, 'tag': tag} for tag in to_add]
session.execute(models.Tag.__table__.insert(), data)
return session.query(models.Tag).filter_by(
resource_id=instance_uuid).all()
def instance_tag_get_by_instance_uuid(context, instance_uuid):
session = get_session()
with session.begin(subtransactions=True):
_check_instance_exists(context, session, instance_uuid)
return session.query(models.Tag).filter_by(
resource_id=instance_uuid).all()
def instance_tag_delete(context, instance_uuid, tag):
session = get_session()
with session.begin(subtransactions=True):
_check_instance_exists(context, session, instance_uuid)
result = session.query(models.Tag).filter_by(
resource_id=instance_uuid, tag=tag).delete()
if not result:
raise exception.InstanceTagNotFound(instance_id=instance_uuid,
tag=tag)
def instance_tag_delete_all(context, instance_uuid):
session = get_session()
with session.begin(subtransactions=True):
_check_instance_exists(context, session, instance_uuid)
session.query(models.Tag).filter_by(resource_id=instance_uuid).delete()
| {
"content_hash": "e32e2211f139072b070d784657712d3a",
"timestamp": "",
"source": "github",
"line_count": 6328,
"max_line_length": 79,
"avg_line_length": 36.3013590391909,
"alnum_prop": 0.5808501839235575,
"repo_name": "projectcalico/calico-nova",
"id": "343cb351714a1e38fc06537e0aa232c62f31bfef",
"size": "230509",
"binary": false,
"copies": "2",
"ref": "refs/heads/calico-readme",
"path": "nova/db/sqlalchemy/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15232446"
},
{
"name": "Shell",
"bytes": "20717"
},
{
"name": "Smarty",
"bytes": "489680"
}
],
"symlink_target": ""
} |
from collections import namedtuple
###############
# Structs #
###############
# Request payloads
ProduceRequest = namedtuple("ProduceRequest",
["topic", "partition", "messages"])
FetchRequest = namedtuple("FetchRequest",
["topic", "partition", "offset", "max_bytes"])
OffsetRequest = namedtuple("OffsetRequest",
["topic", "partition", "time", "max_offsets"])
OffsetCommitRequest = namedtuple("OffsetCommitRequest",
["topic", "partition", "offset", "metadata"])
OffsetFetchRequest = namedtuple("OffsetFetchRequest", ["topic", "partition"])
# Response payloads
ProduceResponse = namedtuple("ProduceResponse",
["topic", "partition", "error", "offset"])
FetchResponse = namedtuple("FetchResponse", ["topic", "partition", "error",
"highwaterMark", "messages"])
OffsetResponse = namedtuple("OffsetResponse",
["topic", "partition", "error", "offsets"])
OffsetCommitResponse = namedtuple("OffsetCommitResponse",
["topic", "partition", "error"])
OffsetFetchResponse = namedtuple("OffsetFetchResponse",
["topic", "partition", "offset",
"metadata", "error"])
BrokerMetadata = namedtuple("BrokerMetadata", ["nodeId", "host", "port"])
PartitionMetadata = namedtuple("PartitionMetadata",
["topic", "partition", "leader",
"replicas", "isr"])
# Other useful structs
OffsetAndMessage = namedtuple("OffsetAndMessage", ["offset", "message"])
Message = namedtuple("Message", ["magic", "attributes", "key", "value"])
TopicAndPartition = namedtuple("TopicAndPartition", ["topic", "partition"])
| {
"content_hash": "0458bb8ff5f9a723b4d745996fe321ce",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 78,
"avg_line_length": 38.979166666666664,
"alnum_prop": 0.5617316942811331,
"repo_name": "duanhongyi/kakfa",
"id": "00df05a69f811130b1285157cc989ad0ffb45e52",
"size": "1871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kafka/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "56284"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import copy
import itertools
import logging
import threading
import random
import string
from functools import wraps, total_ordering
from sqlalchemy import Column, Integer, String, Unicode
from flexget import config_schema, db_schema
from flexget.entry import EntryUnicodeError
from flexget.event import event, fire_event
from flexget.logger import capture_output
from flexget.manager import Session
from flexget.plugin import plugins as all_plugins
from flexget.plugin import (
DependencyError, get_plugins, phase_methods, plugin_schemas, PluginError, PluginWarning, task_phases)
from flexget.utils import requests
from flexget.utils.database import with_session
from flexget.utils.simple_persistence import SimpleTaskPersistence
from flexget.utils.tools import get_config_hash, MergeException, merge_dict_from_to
from flexget.utils.template import render_from_task, FlexGetTemplate
log = logging.getLogger('task')
Base = db_schema.versioned_base('feed', 0)
class TaskConfigHash(Base):
"""Stores the config hash for tasks so that we can tell if the config has changed since last run."""
__tablename__ = 'feed_config_hash'
id = Column(Integer, primary_key=True)
task = Column('name', Unicode, index=True, nullable=False)
hash = Column('hash', String)
def __repr__(self):
return '<TaskConfigHash(task=%s,hash=%s)>' % (self.task, self.hash)
@with_session
def config_changed(task=None, session=None):
"""
Forces config_modified flag to come out true on next run of `task`. Used when the db changes, and all
entries need to be reprocessed.
.. WARNING: DO NOT (FURTHER) USE FROM PLUGINS
:param task: Name of the task. If `None`, will be set for all tasks.
"""
log.debug('Marking config for %s as changed.' % (task or 'all tasks'))
task_hash = session.query(TaskConfigHash)
if task:
task_hash = task_hash.filter(TaskConfigHash.task == task)
task_hash.delete()
def use_task_logging(func):
@wraps(func)
def wrapper(self, *args, **kw):
# Set the task name in the logger and capture output
from flexget import logger
with logger.task_logging(self.name):
if self.output:
with capture_output(self.output, loglevel=self.loglevel):
return func(self, *args, **kw)
else:
return func(self, *args, **kw)
return wrapper
class EntryIterator(object):
"""An iterator over a subset of entries to emulate old task.accepted/rejected/failed/entries properties."""
def __init__(self, entries, states):
self.all_entries = entries
if isinstance(states, str):
states = [states]
self.filter = lambda e: e._state in states
def __iter__(self):
return filter(self.filter, self.all_entries)
def __bool__(self):
return any(e for e in self)
def __len__(self):
return sum(1 for e in self)
def __add__(self, other):
return itertools.chain(self, other)
def __radd__(self, other):
return itertools.chain(other, self)
def __getitem__(self, item):
if isinstance(item, slice):
return list(itertools.islice(self, item.start, item.stop))
if not isinstance(item, int):
raise ValueError('Index must be integer.')
for index, entry in enumerate(self):
if index == item:
return entry
else:
raise IndexError('%d is out of bounds' % item)
def reverse(self):
self.all_entries.sort(reverse=True)
def sort(self, *args, **kwargs):
self.all_entries.sort(*args, **kwargs)
class EntryContainer(list):
"""Container for a list of entries, also contains accepted, rejected failed iterators over them."""
def __init__(self, iterable=None):
list.__init__(self, iterable or [])
self._entries = EntryIterator(self, ['undecided', 'accepted'])
self._accepted = EntryIterator(self, 'accepted') # accepted entries, can still be rejected
self._rejected = EntryIterator(self, 'rejected') # rejected entries, can not be accepted
self._failed = EntryIterator(self, 'failed') # failed entries
self._undecided = EntryIterator(self, 'undecided') # undecided entries (default)
# Make these read-only properties
entries = property(lambda self: self._entries)
accepted = property(lambda self: self._accepted)
rejected = property(lambda self: self._rejected)
failed = property(lambda self: self._failed)
undecided = property(lambda self: self._undecided)
def __repr__(self):
return '<EntryContainer(%s)>' % list.__repr__(self)
class TaskAbort(Exception):
def __init__(self, reason, silent=False):
self.reason = reason
self.silent = silent
def __repr__(self):
return 'TaskAbort(reason=%s, silent=%s)' % (self.reason, self.silent)
@total_ordering
class Task(object):
"""
Represents one task in the configuration.
**Fires events:**
* task.execute.before_plugin
Before a plugin is about to be executed. Note that since this will also include all
builtin plugins the amount of calls can be quite high
``parameters: task, keyword``
* task.execute.after_plugin
After a plugin has been executed.
``parameters: task, keyword``
* task.execute.started
Before a task starts execution
* task.execute.completed
After task execution has been completed
``parameters: task``
"""
# Used to determine task order, when priority is the same
_counter = itertools.count()
RERUN_DEFAULT = 5
RERUN_MAX = 100
def __init__(self, manager, name, config=None, options=None, output=None, loglevel=None, priority=None,
suppress_warnings=None):
"""
:param Manager manager: Manager instance.
:param string name: Name of the task.
:param dict config: Task configuration.
:param options: dict or argparse namespace with options for this task
:param output: A filelike that all logs and stdout will be sent to for this task.
:param loglevel: Custom loglevel, only log messages at this level will be sent to `output`
:param priority: If multiple tasks are waiting to run, the task with the lowest priority will be run first.
The default is 0, if the cron option is set though, the default is lowered to 10.
:param suppress_warnings: Allows suppressing log warning about missing plugin in key phases
"""
self.name = str(name)
self.id = ''.join(random.choice(string.digits) for _ in range(6))
self.manager = manager
if config is None:
config = manager.config['tasks'].get(name, {})
self.config = copy.deepcopy(config)
self.prepared_config = None
if options is None:
options = copy.copy(self.manager.options.execute)
elif isinstance(options, dict):
options_namespace = copy.copy(self.manager.options.execute)
options_namespace.__dict__.update(options)
options = options_namespace
# If execution hasn't specifically set the `allow_manual` flag, set it to False by default
if not hasattr(options, 'allow_manual'):
setattr(options, 'allow_manual', False)
self.options = options
self.output = output
self.loglevel = loglevel
self.suppress_warnings = suppress_warnings or []
if priority is None:
self.priority = 10 if self.options.cron else 0
else:
self.priority = priority
self.priority = priority
self._count = next(self._counter)
self.finished_event = threading.Event()
# simple persistence
self.simple_persistence = SimpleTaskPersistence(self)
# rerun related flags and values
self._rerun_count = 0
self._max_reruns = Task.RERUN_DEFAULT
self._reruns_locked = False
self.config_modified = None
self.enabled = not self.name.startswith('_')
# These are just to query what happened in task. Call task.abort to set.
self.aborted = False
self.abort_reason = None
self.silent_abort = False
self.session = None
self.requests = requests.Session()
# List of all entries in the task
self._all_entries = EntryContainer()
self._rerun = False
self.disabled_phases = []
# current state
self.current_phase = None
self.current_plugin = None
@property
def max_reruns(self):
"""How many times task can be rerunned before stopping"""
return self._max_reruns
@max_reruns.setter
def max_reruns(self, value):
"""Set new maximum value for reruns unless property has been locked"""
if not self._reruns_locked:
self._max_reruns = value
else:
log.debug('max_reruns is locked, %s tried to modify it', self.current_plugin)
def lock_reruns(self):
"""Prevent modification of max_reruns property"""
log.debug('Enabling rerun lock')
self._reruns_locked = True
def unlock_reruns(self):
"""Allow modification of max_reruns property"""
log.debug('Releasing rerun lock')
self._reruns_locked = False
@property
def reruns_locked(self):
return self._reruns_locked
@property
def is_rerun(self):
return bool(self._rerun_count)
@property
def rerun_count(self):
return self._rerun_count
@property
def undecided(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.undecided
@property
def failed(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.failed
@property
def rejected(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.rejected
@property
def accepted(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.accepted
@property
def entries(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.entries
@property
def all_entries(self):
"""
.. deprecated:: Use API v3
"""
return self._all_entries
def __lt__(self, other):
return (self.priority, self._count) < (other.priority, other._count)
def __eq__(self, other):
return (self.priority, self._count) == (other.priority, other._count)
def __str__(self):
return '<Task(name=%s,aborted=%s)>' % (self.name, self.aborted)
def disable_phase(self, phase):
"""Disable ``phase`` from execution.
:param string phase: Name of ``phase``
:raises ValueError: *phase* could not be found.
"""
if phase not in task_phases:
raise ValueError('%s is not a valid phase' % phase)
if phase not in self.disabled_phases:
log.debug('Disabling %s phase' % phase)
self.disabled_phases.append(phase)
def abort(self, reason='Unknown', silent=False, traceback=None):
"""Abort this task execution, no more plugins will be executed except the abort handling ones."""
self.aborted = True
self.abort_reason = reason
self.silent_abort = silent
self.traceback = traceback
if not self.silent_abort:
log.warning('Aborting task (plugin: %s)' % self.current_plugin)
else:
log.debug('Aborting task (plugin: %s)' % self.current_plugin)
raise TaskAbort(reason, silent=silent)
def find_entry(self, category='entries', **values):
"""
Find and return :class:`~flexget.entry.Entry` with given attributes from task or None
:param string category: entries, accepted, rejected or failed. Defaults to entries.
:param values: Key values of entries to be searched
:return: Entry or None
"""
cat = getattr(self, category)
if not isinstance(cat, EntryIterator):
raise TypeError('category must be a EntryIterator')
for entry in cat:
for k, v in values.items():
if not (k in entry and entry[k] == v):
break
else:
return entry
return None
def plugins(self, phase=None):
"""Get currently enabled plugins.
:param string phase:
Optional, limits to plugins currently configured on given phase, sorted in phase order.
:return:
An iterator over configured :class:`flexget.plugin.PluginInfo` instances enabled on this task.
"""
if phase:
plugins = sorted(get_plugins(phase=phase), key=lambda p: p.phase_handlers[phase], reverse=True)
else:
plugins = iter(all_plugins.values())
return (p for p in plugins if p.name in self.config or p.builtin)
def __run_task_phase(self, phase):
"""Executes task phase, ie. call all enabled plugins on the task.
Fires events:
* task.execute.before_plugin
* task.execute.after_plugin
:param string phase: Name of the phase
"""
if phase not in phase_methods:
raise Exception('%s is not a valid task phase' % phase)
# warn if no inputs, filters or outputs in the task
if phase in ['input', 'filter', 'output']:
if not self.manager.unit_test:
# Check that there is at least one manually configured plugin for these phases
for p in self.plugins(phase):
if not p.builtin:
break
else:
if phase not in self.suppress_warnings:
if phase == 'filter':
log.warning('Task does not have any filter plugins to accept entries. '
'You need at least one to accept the entries you want.')
else:
log.warning('Task doesn\'t have any %s plugins, you should add (at least) one!' % phase)
for plugin in self.plugins(phase):
# Abort this phase if one of the plugins disables it
if phase in self.disabled_phases:
return
# store execute info, except during entry events
self.current_phase = phase
self.current_plugin = plugin.name
if plugin.api_ver == 1:
# backwards compatibility
# pass method only task (old behaviour)
args = (self,)
else:
# pass method task, copy of config (so plugin cannot modify it)
args = (self, copy.copy(self.config.get(plugin.name)))
# Hack to make task.session only active for a single plugin
with Session() as session:
self.session = session
try:
fire_event('task.execute.before_plugin', self, plugin.name)
response = self.__run_plugin(plugin, phase, args)
if phase == 'input' and response:
# add entries returned by input to self.all_entries
for e in response:
e.task = self
self.all_entries.extend(response)
finally:
fire_event('task.execute.after_plugin', self, plugin.name)
self.session = None
# check config hash for changes at the end of 'prepare' phase
if phase == 'prepare':
self.check_config_hash()
def __run_plugin(self, plugin, phase, args=None, kwargs=None):
"""
Execute given plugins phase method, with supplied args and kwargs.
If plugin throws unexpected exceptions :meth:`abort` will be called.
:param PluginInfo plugin: Plugin to be executed
:param string phase: Name of the phase to be executed
:param args: Passed to the plugin
:param kwargs: Passed to the plugin
"""
keyword = plugin.name
method = plugin.phase_handlers[phase]
if args is None:
args = []
if kwargs is None:
kwargs = {}
# log.trace('Running %s method %s' % (keyword, method))
# call the plugin
try:
return method(*args, **kwargs)
except TaskAbort:
raise
except PluginWarning as warn:
# check if this warning should be logged only once (may keep repeating)
if warn.kwargs.get('log_once', False):
from flexget.utils.log import log_once
log_once(warn.value, warn.log)
else:
warn.log.warning(warn)
except EntryUnicodeError as eue:
msg = ('Plugin %s tried to create non-unicode compatible entry (key: %s, value: %r)' %
(keyword, eue.key, eue.value))
log.critical(msg)
self.abort(msg)
except PluginError as err:
err.log.critical(err.value)
self.abort(err.value)
except DependencyError as e:
msg = ('Plugin `%s` cannot be used because dependency `%s` is missing.' %
(keyword, e.missing))
log.critical(msg)
log.debug(e.message)
self.abort(msg)
except Warning as e:
# If warnings have been elevated to errors
msg = 'Warning during plugin %s: %s' % (keyword, e)
log.exception(msg)
self.abort(msg)
except Exception as e:
msg = 'BUG: Unhandled error in plugin %s: %s' % (keyword, e)
log.critical(msg)
traceback = self.manager.crash_report()
self.abort(msg, traceback=traceback)
def rerun(self, plugin=None, reason=None):
"""
Immediately re-run the task after execute has completed,
task can be re-run up to :attr:`.max_reruns` times.
:param str plugin: Plugin name
:param str reason: Why the rerun is done
"""
msg = 'Plugin {0} has requested task to be ran again after execution has completed.'.format(
self.current_plugin if plugin is None else plugin)
if reason:
msg += ' Reason: {0}'.format(reason)
# Only print the first request for a rerun to the info log
if self._rerun:
log.debug(msg)
else:
log.info(msg)
self._rerun = True
def config_changed(self):
"""
Sets config_modified flag to True for the remainder of this run.
Used when the db changes, and all entries need to be reprocessed.
"""
self.config_modified = True
def merge_config(self, new_config):
try:
merge_dict_from_to(new_config, self.config)
except MergeException as e:
raise PluginError('Failed to merge configs for task %s: %s' % (self.name, e))
def check_config_hash(self):
"""
Checks the task's config hash and updates the hash if necessary.
"""
# Save current config hash and set config_modified flag
config_hash = get_config_hash(self.config)
if self.is_rerun:
# Restore the config to state right after start phase
if self.prepared_config:
self.config = copy.deepcopy(self.prepared_config)
else:
log.error('BUG: No prepared_config on rerun, please report.')
with Session() as session:
last_hash = session.query(TaskConfigHash).filter(TaskConfigHash.task == self.name).first()
if not last_hash:
session.add(TaskConfigHash(task=self.name, hash=config_hash))
self.config_changed()
elif last_hash.hash != config_hash:
last_hash.hash = config_hash
self.config_changed()
def _execute(self):
"""Executes the task without rerunning."""
if not self.enabled:
log.debug('Not running disabled task %s' % self.name)
return
log.debug('executing %s' % self.name)
# Handle keyword args
if self.options.learn:
log.info('Disabling download and output phases because of --learn')
self.disable_phase('download')
self.disable_phase('output')
if self.options.disable_phases:
list(map(self.disable_phase, self.options.disable_phases))
if self.options.inject:
# If entries are passed for this execution (eg. rerun), disable the input phase
self.disable_phase('input')
self.all_entries.extend(copy.deepcopy(self.options.inject))
# run phases
try:
for phase in task_phases:
if phase in self.disabled_phases:
# log keywords not executed
for plugin in self.plugins(phase):
if plugin.name in self.config:
log.info('Plugin %s is not executed because %s phase is disabled (e.g. --test)' %
(plugin.name, phase))
continue
if phase in ('start', 'prepare') and self.is_rerun:
log.debug('skipping phase %s during rerun', phase)
elif phase == 'exit' and self._rerun and self._rerun_count < self.max_reruns:
log.debug('not running task_exit yet because task will rerun')
else:
# run all plugins with this phase
self.__run_task_phase(phase)
if phase == 'start':
# Store a copy of the config state after start phase to restore for reruns
self.prepared_config = copy.deepcopy(self.config)
except TaskAbort:
try:
self.__run_task_phase('abort')
except TaskAbort as e:
log.exception('abort handlers aborted: %s' % e)
raise
else:
for entry in self.all_entries:
entry.complete()
@use_task_logging
def execute(self):
"""
Executes the the task.
If :attr:`.enabled` is False task is not executed. Certain :attr:`.options`
affect how execution is handled.
- :attr:`.options.disable_phases` is a list of phases that are not enabled
for this execution.
- :attr:`.options.inject` is a list of :class:`Entry` instances used instead
of running input phase.
"""
try:
self.finished_event.clear()
if self.options.cron:
self.manager.db_cleanup()
fire_event('task.execute.started', self)
while True:
self._execute()
# rerun task
if self._rerun and self._rerun_count < self.max_reruns and self._rerun_count < Task.RERUN_MAX:
log.info('Rerunning the task in case better resolution can be achieved.')
self._rerun_count += 1
# TODO: Potential optimization is to take snapshots (maybe make the ones backlog uses built in
# instead of taking another one) after input and just inject the same entries for the rerun
self._all_entries = EntryContainer()
self._rerun = False
continue
elif self._rerun:
log.info('Task has been re-run %s times already, stopping for now' % self._rerun_count)
break
fire_event('task.execute.completed', self)
finally:
self.finished_event.set()
@staticmethod
def validate_config(config):
schema = plugin_schemas(interface='task')
# Don't validate commented out plugins
schema['patternProperties'] = {'^_': {}}
return config_schema.process_config(config, schema)
def __copy__(self):
new = type(self)(self.manager, self.name, self.config, self.options)
# Update all the variables of new instance to match our own
new.__dict__.update(self.__dict__)
# Some mutable objects need to be copies
new.options = copy.copy(self.options)
new.config = copy.deepcopy(self.config)
return new
copy = __copy__
def render(self, template):
"""
Renders a template string based on fields in the entry.
:param template: A template string or FlexGetTemplate that uses jinja2 or python string replacement format.
:return: The result of the rendering.
:rtype: string
:raises RenderError: If there is a problem.
"""
if not isinstance(template, (str, FlexGetTemplate)):
raise ValueError(
'Trying to render non string template or unrecognized template format, got %s' % repr(template))
log.trace('rendering: %s', template)
return render_from_task(template, self)
@event('config.register')
def register_config_key():
task_config_schema = {
'type': 'object',
'additionalProperties': plugin_schemas(interface='task')
}
config_schema.register_config_key('tasks', task_config_schema, required=True)
| {
"content_hash": "724e09b7fe4b22b0be3963986de82f6b",
"timestamp": "",
"source": "github",
"line_count": 702,
"max_line_length": 116,
"avg_line_length": 36.67948717948718,
"alnum_prop": 0.592527865159812,
"repo_name": "OmgOhnoes/Flexget",
"id": "f240bf57daa8e97cdb8e0e976388d1f80acd46e5",
"size": "25749",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "HTML",
"bytes": "79376"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3324701"
},
{
"name": "SRecode Template",
"bytes": "3"
}
],
"symlink_target": ""
} |
import ansible
from ansible import utils
from ansible.utils import template
from ansible.runner.return_data import ReturnData
class ActionModule(object):
''' Print statements during execution '''
TRANSFERS_FILES = False
def __init__(self, runner):
self.runner = runner
self.basedir = runner.basedir
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
args = {}
if complex_args:
args.update(complex_args)
# attempt to prevent confusing messages when the variable didn't interpolate
module_args = module_args.replace("{{ ","{{").replace(" }}","}}")
kv = utils.parse_kv(module_args)
args.update(kv)
if not 'msg' in args and not 'var' in args:
args['msg'] = 'Hello world!'
result = {}
if 'msg' in args:
if 'fail' in args and utils.boolean(args['fail']):
result = dict(failed=True, msg=args['msg'])
else:
result = dict(msg=args['msg'])
elif 'var' in args and not utils.LOOKUP_REGEX.search(args['var']):
results = template.template(self.basedir, args['var'], inject, convert_bare=True)
result['var'] = { args['var']: results }
# force flag to make debug output module always verbose
result['verbose_always'] = True
return ReturnData(conn=conn, result=result)
| {
"content_hash": "6621bfde0abe2fea7a24d2ac8538864c",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 93,
"avg_line_length": 33.51162790697674,
"alnum_prop": 0.5981956974323387,
"repo_name": "ygol/dotfiles",
"id": "eaf1364c3f39ac1bf5e6eaa0746419f35ab94518",
"size": "2142",
"binary": false,
"copies": "119",
"ref": "refs/heads/master",
"path": "bin/.venv-ansible-venv/lib/python2.6/site-packages/ansible/runner/action_plugins/debug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "513"
},
{
"name": "JavaScript",
"bytes": "10707"
},
{
"name": "Lua",
"bytes": "35950"
},
{
"name": "Perl",
"bytes": "8914"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "7417846"
},
{
"name": "Ruby",
"bytes": "24958"
},
{
"name": "Shell",
"bytes": "316253"
},
{
"name": "Vim script",
"bytes": "1437097"
}
],
"symlink_target": ""
} |
from os import path, system
import re
import requests
from bs4 import BeautifulSoup
from getpass import getpass
un = 'veritas'
pw = getpass(prompt="enter pword")
page_url = "https://veritas.sao.arizona.edu/wiki/index.php/Advanced_Analysis_Test_Samples"
response = requests.get(page_url, auth=(un,pw))
pass # do nothing
print(response.text)
rxpage = re.compile(r'wiki')
match = rxpage.search(page_url)
wiki_base = page_url[:match.start()]
print(wiki_base)
soup = BeautifulSoup(response.text, 'lxml')
urls = []
names = []
for i, link in enumerate(soup.findAll('a')):
file_addr = link.get('href')
if file_addr:
_FULLURL = wiki_base + file_addr
if _FULLURL.endswith('.txt'):
urls.append(_FULLURL)
slct = soup.select('a')[i].attrs['href']
names.append(path.split(slct)[1])
names_urls = zip(names, urls)
save_dir = path.expanduser('~')+"VERITAS/VEGAS-v2_5_5/validation/standard_runlists"
# directory not working
for name, url in names_urls:
print('Downloading %s' % url)
cmd = 'wget --user {0} --password {1} --directory-prefix={2} {3}'.format(un, pw, save_dir, url)
print(system('wget --user {0} --password {1} {2}'.format(un, pw, url)))
| {
"content_hash": "7e024ead0ec4b99649672cf38ccb42f7",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 99,
"avg_line_length": 30.45,
"alnum_prop": 0.6584564860426929,
"repo_name": "mbuchove/notebook-wurk-b",
"id": "59f0ed8d7721c0cd5620f87cba397fe03848eff2",
"size": "1237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/download_from_page.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "10813"
},
{
"name": "Java",
"bytes": "735"
},
{
"name": "Jupyter Notebook",
"bytes": "2286044"
},
{
"name": "Makefile",
"bytes": "266"
},
{
"name": "Python",
"bytes": "48911"
},
{
"name": "R",
"bytes": "46106"
},
{
"name": "Smalltalk",
"bytes": "1847"
},
{
"name": "TeX",
"bytes": "6445"
}
],
"symlink_target": ""
} |
from setuptools import setup
from setuptools.command.sdist import sdist as _sdist
import re
import sys
import time
import codecs
import subprocess
if sys.version < "2.2.3":
from distutils.dist import DistributionMetadata
DistributionMetadata.classifiers = None
DistributionMetadata.download_url = None
# Workaround for problems caused by this import
# It's either this or hardcoding the version.
# from pyrax.version import version
with open("pyrax/version.py", "rt") as vfile:
version_text = vfile.read()
vmatch = re.search(r'version ?= ?"(.+)"$', version_text)
version = vmatch.groups()[0]
# When set to '0' this expands in the RPM SPEC file to a unique date-base string
# Set to another value when cutting official release RPMS, then change back to
# zero for the next development cycle
release = '0'
class sdist(_sdist):
""" custom sdist command, to prep pyrax.spec file """
def run(self):
global version
global release
# Create a development release string for later use
git_head = subprocess.Popen("git log -1 --pretty=format:%h",
shell=True,
stdout=subprocess.PIPE).communicate()[0].strip()
date = time.strftime("%Y%m%d%H%M%S", time.gmtime())
git_release = "%sgit%s" % (date, git_head)
# Expand macros in pyrax.spec.in
spec_in = open('pyrax.spec.in', 'r')
spec = open('pyrax.spec', 'w')
for line in spec_in.xreadlines():
if "@VERSION@" in line:
line = line.replace("@VERSION@", version)
elif "@RELEASE@" in line:
# If development release, include date+githash in %{release}
if release.startswith('0'):
release += '.' + git_release
line = line.replace("@RELEASE@", release)
spec.write(line)
spec_in.close()
spec.close()
# Run parent constructor
_sdist.run(self)
# Get the long description from the relevant file
try:
f = codecs.open('README.rst', encoding='utf-8')
long_description = f.read()
f.close()
except:
long_description = ''
testing_requires = ["mock"]
setup(
name="pyrax",
version=version,
description="Python language bindings for OpenStack Clouds.",
long_description=long_description,
author="Rackspace",
author_email="[email protected]",
url="https://github.com/rackspace/pyrax",
license='Apache License, Version 2.0',
keywords="pyrax rackspace cloud openstack",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 2",
"Operating System :: OS Independent",
],
install_requires=[
"python-novaclient>=2.13.0",
"rackspace-novaclient",
"keyring",
"requests>=2.2.1",
"six>=1.5.2",
] + testing_requires,
packages=[
"pyrax",
"pyrax/identity",
],
cmdclass={'sdist': sdist}
)
| {
"content_hash": "36cc4dc70fc4fac1eb12cd87911bbdf3",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 84,
"avg_line_length": 31.938144329896907,
"alnum_prop": 0.6113621691413815,
"repo_name": "0dataloss/pyrax",
"id": "3cc3c4ffb1590dca5a5e5172263acb541696c1d1",
"size": "3121",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "886"
},
{
"name": "Python",
"bytes": "1273276"
}
],
"symlink_target": ""
} |
from behave import *
import urllib2
import urllib
import json
import jsonpath_rw
## The basic and critical remote collector.
## It defines:
## context.code
## context.content_type
## context.content
## context.content_length
@given('I collect data at URL "{url}"')
def step_impl(context, url):
## Build request.
values = {}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
## Make the attempt, fail.
response = None
try: response = urllib2.urlopen(req)
except urllib2.URLError as e:
if hasattr(e, 'code'):
print('Server error, code: ', e.code)
if hasattr(e, 'reason'):
print('Failed to reach server: ', e.reason)
assert True is False
else:
## Final
pass
## Parcel out what we have for downstream checking.
context.code = response.code
## https://docs.python.org/2/library/mimetools.html#mimetools.Message
context.content_type = response.info().gettype()
context.content = response.read()
context.content_length = 0
if context.content :
context.content_length = len(context.content)
@then('the content type should be "{ctype}"')
def step_impl(context, ctype):
if not context.content_type :
## Apparently no content type at all...
assert True is False
else:
assert context.content_type == ctype
@then('the content should contain "{text}"')
def step_impl(context, text):
if not context.content :
## Apparently no text at all...
assert True is False
else:
assert context.content.rfind(text) != -1
## Adds:
## context.content_json
@when('the content is converted to JSON')
def step_impl(context):
if not context.content :
## Apparently no text at all...
assert True is False
else:
context.content_json = json.loads(context.content)
@then('the JSON should have the top-level property "{prop}"')
def step_impl(context, prop):
if not context.content_json :
## Apparently no JSON at all...
assert True is False
else:
assert context.content_json.get(prop)
@then('the JSON should have the JSONPath "{jsonpath}"')
def step_impl(context, jsonpath):
if not context.content_json :
## Apparently no JSON at all...
assert True is False
else:
jsonpath_expr = jsonpath_rw.parse(jsonpath)
res = jsonpath_expr.find(context.content_json)
#assert len(res) > 0
#print(res)
assert res
@then('the JSON should have JSONPath "{jsonpath}" equal to string "{value}"')
def step_impl(context, jsonpath, value):
if not context.content_json :
## Apparently no JSON at all...
assert True is False
else:
jsonpath_expr = jsonpath_rw.parse(jsonpath)
res = jsonpath_expr.find(context.content_json)
if not res[0] :
assert True is False
else:
assert res[0].value == value
@then('the JSON should have JSONPath "{jsonpath}" equal to float "{value}"')
def step_impl(context, jsonpath, value):
if not context.content_json :
## Apparently no JSON at all...
assert True is False
else:
jsonpath_expr = jsonpath_rw.parse(jsonpath)
res = jsonpath_expr.find(context.content_json)
if not res[0] :
assert True is False
else:
assert res[0].value == float(value)
| {
"content_hash": "156eda585f665aa395471a7467aa59a2",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 77,
"avg_line_length": 30.460176991150444,
"alnum_prop": 0.6263800116211505,
"repo_name": "ValWood/go-site",
"id": "68f4c9c542e72a7225c1de08bc36bf8b03b5a0a1",
"size": "3512",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tests/behave/steps/basic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Emacs Lisp",
"bytes": "782"
},
{
"name": "Gherkin",
"bytes": "2852"
},
{
"name": "JavaScript",
"bytes": "9509"
},
{
"name": "Makefile",
"bytes": "619"
},
{
"name": "PLpgSQL",
"bytes": "3633"
},
{
"name": "Perl",
"bytes": "33065"
},
{
"name": "Python",
"bytes": "4991"
},
{
"name": "Shell",
"bytes": "1733"
}
],
"symlink_target": ""
} |
import line_style
import pychart_util
import chart_object
import fill_style
import legend
import range_plot_doc
from pychart_types import *
from types import *
from scaling import *
class T(chart_object.T):
__doc__ = range_plot_doc.doc
keys = {
"data" : (AnyType, None, pychart_util.data_desc),
"label": (StringType, "???", pychart_util.label_desc),
"xcol" : (IntType, 0, pychart_util.xcol_desc),
"min_col": (IntType, 1,
"The lower bound of the sweep is extracted from "
+ "this column of data."),
"max_col": (IntType, 2,
"The upper bound of the sweep is extracted from "
+ "this column of data."),
"line_style": (line_style.T, line_style.default,
"The style of the boundary line."),
"fill_style": (fill_style.T, fill_style.default,
""),
}
##AUTOMATICALLY GENERATED
##END AUTOMATICALLY GENERATED
def check_integrity(self):
chart_object.T.check_integrity(self)
def get_data_range(self, which):
if which == 'X':
return pychart_util.get_data_range(self.data, self.xcol)
else:
ymax = (pychart_util.get_data_range(self.data, self.max_col))[1]
ymin = (pychart_util.get_data_range(self.data, self.min_col))[0]
return (ymin, ymax)
def get_legend_entry(self):
if self.label:
return legend.Entry(line_style=self.line_style,
fill_style=self.fill_style,
label=self.label)
return None
def draw(self, ar, can):
prevPair = None
xmin=999999
xmax=-999999
ymin=999999
ymax=-999999
# Draw the boundary in a single stroke.
can.gsave()
can.newpath()
for pair in self.data:
x = pair[self.xcol]
y = pychart_util.get_sample_val(pair, self.max_col)
if y == None:
continue
xmin = min(xmin, ar.x_pos(x))
xmax = max(xmax, ar.x_pos(x))
ymin = min(ymin, ar.y_pos(y))
ymax = max(ymax, ar.y_pos(y))
if prevPair != None:
can.lineto(xscale(ar.x_pos(x)), yscale(ar.y_pos(y)))
else:
can.moveto(xscale(ar.x_pos(x)), yscale(ar.y_pos(y)))
prevPair = pair
for i in range(len(self.data)-1, -1, -1):
pair = self.data[i]
x = pair[self.xcol]
y = pychart_util.get_sample_val(pair, self.min_col)
if None in (x, y):
continue
xmin = min(xmin, ar.x_pos(x))
xmax = max(xmax, ar.x_pos(x))
ymin = min(ymin, ar.y_pos(y))
ymax = max(ymax, ar.y_pos(y))
can.lineto(xscale(ar.x_pos(x)), yscale(ar.y_pos(y)))
can.closepath()
# create a clip region, and fill it.
can.clip_sub()
can.fill_with_pattern(self.fill_style, xmin, ymin, xmax, ymax)
can.grestore()
if self.line_style:
# draw the boundary.
prevPair = None
can.newpath()
can.set_line_style(self.line_style)
for pair in self.data:
x = pair[self.xcol]
y = pychart_util.get_sample_val(pair, self.min_col)
if None in (x, y):
continue
if prevPair != None:
can.lineto(xscale(ar.x_pos(x)), yscale(ar.y_pos(y)))
else:
can.moveto(xscale(ar.x_pos(x)), yscale(ar.y_pos(y)))
prevPair = pair
can.stroke()
prevPair = None
can.newpath()
can.set_line_style(self.line_style)
for pair in self.data:
x = pair[self.xcol]
y = pychart_util.get_sample_val(pair, self.max_col)
if y == None:
continue
if prevPair != None:
can.lineto(xscale(ar.x_pos(x)), yscale(ar.y_pos(y)))
else:
can.moveto(xscale(ar.x_pos(x)), yscale(ar.y_pos(y)))
prevPair = pair
can.stroke()
| {
"content_hash": "b8934426a586bb3cfbf37140463e6ab3",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 76,
"avg_line_length": 32.94656488549618,
"alnum_prop": 0.4932808155699722,
"repo_name": "nzavagli/UnrealPy",
"id": "eed79462ef5cada54ca929fa53e3d010c5b05e85",
"size": "4857",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/PyChart-1.39/pychart/range_plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "2753"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "94225"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "ApacheConf",
"bytes": "12482"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "1093261"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "63276"
},
{
"name": "Batchfile",
"bytes": "147828"
},
{
"name": "BlitzBasic",
"bytes": "185102"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108397183"
},
{
"name": "C#",
"bytes": "156749"
},
{
"name": "C++",
"bytes": "13535833"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CMake",
"bytes": "12441"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "430375"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "9679"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cucumber",
"bytes": "390"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "7556"
},
{
"name": "DIGITAL Command Language",
"bytes": "425938"
},
{
"name": "DTrace",
"bytes": "6706"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "18303"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "38458"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "29880"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gnuplot",
"bytes": "11501"
},
{
"name": "Go",
"bytes": "5444"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groff",
"bytes": "3458639"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "HTML",
"bytes": "92126540"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "4040623"
},
{
"name": "JavaScript",
"bytes": "223927"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "6921"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "19509"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "2053844"
},
{
"name": "Mask",
"bytes": "815"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Modelica",
"bytes": "6213"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Module Management System",
"bytes": "14798"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "104883"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "49943"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "68611"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "349743"
},
{
"name": "Perl",
"bytes": "5931502"
},
{
"name": "Perl6",
"bytes": "113623"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PostScript",
"bytes": "18216"
},
{
"name": "PowerShell",
"bytes": "14236"
},
{
"name": "Prolog",
"bytes": "43750"
},
{
"name": "Protocol Buffer",
"bytes": "3401"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "122886156"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "49247"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "17708"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "SAS",
"bytes": "15603"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "50346"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "2925097"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "158636"
},
{
"name": "Smarty",
"bytes": "523"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "Tcl",
"bytes": "6077233"
},
{
"name": "TeX",
"bytes": "487999"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "32053"
},
{
"name": "Visual Basic",
"bytes": "19441"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XS",
"bytes": "178055"
},
{
"name": "XSLT",
"bytes": "1995174"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Yacc",
"bytes": "25665"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "31545"
},
{
"name": "mupad",
"bytes": "2442"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
} |
"""Class for starwelsd node under test"""
import decimal
import errno
import http.client
import json
import logging
import os
import re
import subprocess
import time
from .authproxy import JSONRPCException
from .util import (
assert_equal,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
STARWELSD_PROC_WAIT_TIMEOUT = 60
class TestNode():
"""A class for representing a starwelsd node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir, use_cli=False):
self.index = i
self.datadir = os.path.join(dirname, "node" + str(i))
self.rpchost = rpchost
if timewait:
self.rpc_timeout = timewait
else:
# Wait for up to 60 seconds for the RPC server to respond
self.rpc_timeout = 60
if binary is None:
self.binary = os.getenv("STARWELSD", "starwelsd")
else:
self.binary = binary
self.stderr = stderr
self.coverage_dir = coverage_dir
# Most callers will just need to add extra args to the standard list below. For those callers that need more flexibity, they can just set the args property directly.
self.extra_args = extra_args
self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i]
self.cli = TestNodeCLI(os.getenv("STARWELSCLI", "starwels-cli"), self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def __del__(self):
# Ensure that we don't leave any starwelsd processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print("Cleaning up leftover process")
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection"
return getattr(self.rpc, name)
def start(self, extra_args=None, stderr=None, *args, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
if stderr is None:
stderr = self.stderr
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by starwelsd, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
self.process = subprocess.Popen(self.args + extra_args, stderr=stderr, *args, **kwargs)
self.running = True
self.log.debug("starwelsd started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the starwelsd process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
assert self.process.poll() is None, "starwelsd exited with status %i during initialization" % self.process.returncode
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. starwelsd still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
raise AssertionError("Unable to connect to starwelsd")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected
assert self.rpc
wallet_path = "wallet/%s" % wallet_name
return self.rpc / wallet_path
def stop_node(self):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert_equal(return_code, 0)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=STARWELSD_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes starwelsd to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
def add_p2p_connection(self, p2p_conn, *args, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(*args, **kwargs)
self.p2ps.append(p2p_conn)
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, "No p2p connection"
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to starwels-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.starwelscli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with starwels-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run starwels-cli command. Deserializes returned string as python object."""
pos_args = [str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same starwels-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running starwels-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
| {
"content_hash": "0e6ab36ce49f3c6d0cb982233eaabce9",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 248,
"avg_line_length": 38.66308243727599,
"alnum_prop": 0.6134235654027996,
"repo_name": "starwels/starwels",
"id": "dd4c955bdcf57801867607c0298d6e5fbd036e68",
"size": "10992",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/functional/test_framework/test_node.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "685646"
},
{
"name": "C++",
"bytes": "5453841"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30298"
},
{
"name": "M4",
"bytes": "195016"
},
{
"name": "Makefile",
"bytes": "114512"
},
{
"name": "Objective-C",
"bytes": "3579"
},
{
"name": "Objective-C++",
"bytes": "6752"
},
{
"name": "Python",
"bytes": "1288886"
},
{
"name": "QMake",
"bytes": "757"
},
{
"name": "Shell",
"bytes": "66964"
}
],
"symlink_target": ""
} |
import argparse
import inspect
import logging
import json
import base64
from docstring_parser import parse
from collections import namedtuple
from flask import Flask, request
from flask_restx import Api, Resource, fields, abort
from flask_cors import CORS
from indra import get_config
from indra.sources import trips, reach, bel, biopax, eidos, hume, cwms, sofia
from indra.databases import hgnc_client
from indra.statements import stmts_from_json, get_statement_by_name
from indra.assemblers.pysb import PysbAssembler
import indra.assemblers.pysb.assembler as pysb_assembler
from indra.assemblers.cx import CxAssembler
from indra.assemblers.graph import GraphAssembler
from indra.assemblers.cyjs import CyJSAssembler
from indra.assemblers.sif import SifAssembler
from indra.assemblers.english import EnglishAssembler
from indra.tools.assemble_corpus import *
from indra.databases import cbio_client
from indra.sources.indra_db_rest import get_statements
from indra.sources.ndex_cx.api import process_ndex_network
from indra.sources.reach.api import reach_nxml_url, reach_text_url
from indra.belief.wm_scorer import get_eidos_scorer
from indra.ontology.bio import bio_ontology
from indra.ontology.world import world_ontology
from indra.pipeline import AssemblyPipeline, pipeline_functions
from indra.preassembler.custom_preassembly import *
logger = logging.getLogger('rest_api')
logger.setLevel(logging.DEBUG)
# Create Flask app, api, namespaces, and models
app = Flask(__name__)
api = Api(
app, title='INDRA REST API', description='REST API for INDRA webservice')
CORS(app)
preassembly_ns = api.namespace(
'Preassembly', 'Preassemble INDRA Statements', path='/preassembly/')
sources_ns = api.namespace(
'Sources', 'Get INDRA Statements from various sources', path='/')
assemblers_ns = api.namespace(
'Assemblers', 'Assemble INDRA Statements into models', path='/assemblers/')
ndex_ns = api.namespace('NDEx', 'Use NDEx service', path='/')
indra_db_rest_ns = api.namespace(
'INDRA DB REST', 'Use INDRA DB REST API', path='/indra_db_rest/')
databases_ns = api.namespace(
'Databases', 'Access external databases', path='/databases/')
# Models that can be inherited and reused in different namespaces
dict_model = api.model('dict', {})
stmts_model = api.model('Statements', {
'statements': fields.List(fields.Nested(dict_model), example=[{
"id": "acc6d47c-f622-41a4-8ae9-d7b0f3d24a2f",
"type": "Complex",
"members": [
{"db_refs": {"TEXT": "MEK", "FPLX": "MEK"}, "name": "MEK"},
{"db_refs": {"TEXT": "ERK", "FPLX": "ERK"}, "name": "ERK"}
],
"sbo": "https://identifiers.org/SBO:0000526",
"evidence": [{"text": "MEK binds ERK", "source_api": "trips"}]
}])})
bio_text_model = api.model('BioText', {
'text': fields.String(example='GRB2 binds SHC.')})
wm_text_model = api.model('WMText', {
'text': fields.String(example='Rainfall causes floods.')})
jsonld_model = api.model('jsonld', {
'jsonld': fields.String(example='{}')})
genes_model = api.model('Genes', {
'genes': fields.List(fields.String, example=['BRAF', 'MAP2K1'])})
# Store the arguments by type
int_args = ['poolsize', 'size_cutoff']
float_args = ['score_threshold', 'belief_cutoff']
boolean_args = [
'do_rename', 'use_adeft', 'do_methionine_offset', 'do_orthology_mapping',
'do_isoform_mapping', 'use_cache', 'return_toplevel', 'flatten_evidence',
'normalize_equivalences', 'normalize_opposites', 'invert', 'remove_bound',
'specific_only', 'allow_families', 'match_suffix', 'update_belief']
list_args = [
'gene_list', 'name_list', 'values', 'source_apis', 'uuids', 'curations',
'correct_tags', 'ignores', 'deletions']
dict_args = [
'grounding_map', 'misgrounding_map', 'whitelist', 'mutations']
def _return_stmts(stmts):
if stmts:
stmts_json = stmts_to_json(stmts)
res = {'statements': stmts_json}
else:
res = {'statements': []}
return res
def _stmts_from_proc(proc):
if proc and proc.statements:
stmts = stmts_to_json(proc.statements)
res = {'statements': stmts}
else:
res = {'statements': []}
return res
# Create Resources in Preassembly Namespace
# Manually add preassembly resources not based on assembly corpus functions
pipeline_model = api.inherit('Pipeline', stmts_model, {
'pipeline': fields.List(fields.Nested(dict_model), example=[
{'function': 'filter_grounded_only'},
{'function': 'run_preassembly', 'kwargs': {'return_toplevel': False}}
])
})
# There's an extra blank line between parameters here and in all the following
# docstrings for better visualization in Swagger
@preassembly_ns.expect(pipeline_model)
@preassembly_ns.route('/pipeline')
class RunPipeline(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Run an assembly pipeline for a list of Statements.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to run the pipeline.
pipeline : list[dict]
A list of dictionaries representing steps in the pipeline. Each
step should have a 'function' key and, if appropriate, 'args' and
'kwargs' keys. For more documentation and examples, see
https://indra.readthedocs.io/en/latest/modules/pipeline.html
Returns
-------
statements : list[indra.statements.Statement.to_json()]
The list of INDRA Statements resulting from running the pipeline
on the list of input Statements.
"""
args = request.json
stmts = stmts_from_json(args.get('statements'))
pipeline_steps = args.get('pipeline')
ap = AssemblyPipeline(pipeline_steps)
stmts_out = ap.run(stmts)
return _return_stmts(stmts_out)
# Dynamically generate resources for assembly corpus functions
class PreassembleStatements(Resource):
"""Parent Resource for Preassembly resources."""
func_name = None
def process_args(self, args_json):
for arg in args_json:
if arg == 'stmt_type':
args_json[arg] = get_statement_by_name(args_json[arg])
elif arg in ['matches_fun', 'refinement_fun']:
args_json[arg] = pipeline_functions[args_json[arg]]
elif arg == 'curations':
Curation = namedtuple(
'Curation', ['pa_hash', 'source_hash', 'tag'])
args_json[arg] = [
Curation(cur['pa_hash'], cur['source_hash'], cur['tag'])
for cur in args_json[arg]]
elif arg == 'belief_scorer':
if args_json[arg] == 'wm':
args_json[arg] = get_eidos_scorer()
else:
args_json[arg] = None
elif arg == 'ontology':
if args_json[arg] == 'wm':
args_json[arg] = world_ontology
else:
args_json[arg] = bio_ontology
elif arg == 'whitelist' or arg == 'mutations':
args_json[arg] = {
gene: [tuple(mod) for mod in mods]
for gene, mods in args_json[arg].items()}
return args_json
@api.doc(False)
def options(self):
return {}
def post(self):
args = self.process_args(request.json)
stmts = stmts_from_json(args.pop('statements'))
stmts_out = pipeline_functions[self.func_name](stmts, **args)
return _return_stmts(stmts_out)
def make_preassembly_model(func):
"""Create new Flask model with function arguments."""
args = inspect.signature(func).parameters
# We can reuse Staetments model if only stmts_in or stmts and **kwargs are
# arguments of the function
if ((len(args) == 1 and ('stmts_in' in args or 'stmts' in args)) or
(len(args) == 2 and 'kwargs' in args and
('stmts_in' in args or 'stmts' in args))):
return stmts_model
# Inherit a model if there are other arguments
model_fields = {}
for arg in args:
if arg != 'stmts_in' and arg != 'stmts' and arg != 'kwargs':
default = None
if args[arg].default is not inspect.Parameter.empty:
default = args[arg].default
# Need to use default for boolean and example for other types
if arg in boolean_args:
model_fields[arg] = fields.Boolean(default=default)
elif arg in int_args:
model_fields[arg] = fields.Integer(example=default)
elif arg in float_args:
model_fields[arg] = fields.Float(example=0.7)
elif arg in list_args:
if arg == 'curations':
model_fields[arg] = fields.List(
fields.Nested(dict_model),
example=[{'pa_hash': '1234', 'source_hash': '2345',
'tag': 'wrong_relation'}])
else:
model_fields[arg] = fields.List(
fields.String, example=default)
elif arg in dict_args:
model_fields[arg] = fields.Nested(dict_model)
else:
model_fields[arg] = fields.String(example=default)
new_model = api.inherit(
('%s_input' % func.__name__), stmts_model, model_fields)
return new_model
def update_docstring(func):
doc = func.__doc__
docstring = parse(doc)
new_doc = docstring.short_description + '\n\n'
if docstring.long_description:
new_doc += (docstring.long_description + '\n\n')
new_doc += ('Parameters\n----------\n')
for param in docstring.params:
if param.arg_name in ['save', 'save_unique']:
continue
elif param.arg_name in ['stmts', 'stmts_in']:
param.arg_name = 'statements'
param.type_name = 'list[indra.statements.Statement.to_json()]'
elif param.arg_name == 'belief_scorer':
param.type_name = 'Optional[str] or None'
param.description = (
'Type of BeliefScorer to use in calculating Statement '
'probabilities. If None is provided (default), then the '
'default scorer is used (good for biology use case). '
'For WorldModelers use case belief scorer should be set '
'to "wm".')
elif param.arg_name == 'ontology':
param.type_name = 'Optional[str] or None'
param.description = (
'Type of ontology to use for preassembly ("bio" or "wm"). '
'If None is provided (default), then the bio ontology is used.'
'For WorldModelers use case ontology should be set to "wm".')
elif param.arg_name in ['matches_fun', 'refinement_fun']:
param.type_name = 'str'
elif param.arg_name == 'curations':
param.type_name = 'list[dict]'
param.description = (
'A list of dictionaries representing curations. Each '
'dictionary must have "pa_hash" (preassembled statement hash)'
', "source_hash", (evidence hash) and "tag" (e.g. "correct", '
'"wrong_relation", etc.) keys.')
new_doc += (param.arg_name + ' : ' + param.type_name + '\n' +
param.description + '\n\n')
new_doc += 'Returns\n----------\n'
new_doc += 'statements : list[indra.statements.Statement.to_json()]\n'
new_doc += 'A list of processed INDRA Statements'
return docstring.short_description, new_doc
# Create resources for each of assembly_corpus functions
for func_name, func in pipeline_functions.items():
if func.__module__ == 'indra.tools.assemble_corpus':
doc = ''
short_doc = ''
# Get the function description from docstring
if func.__doc__:
short_doc, doc = update_docstring(func)
new_model = make_preassembly_model(func)
@preassembly_ns.expect(new_model)
@preassembly_ns.route(('/%s' % func_name),
doc={'summary': short_doc})
class NewFunction(PreassembleStatements):
func_name = func_name
def post(self):
return super().post()
post.__doc__ = doc
# Create resources for Sources namespace
# REACH
reach_text_model = api.inherit('ReachText', bio_text_model, {
'offline': fields.Boolean(default=False),
'url': fields.String(example=reach_text_url)
})
reach_json_model = api.model('ReachJSON', {'json': fields.String(example='{}')})
reach_pmc_model = api.model('ReachPMC', {
'pmcid': fields.String(example='PMC3717945'),
'offline': fields.Boolean(default=False),
'url': fields.String(example=reach_nxml_url)
})
@sources_ns.expect(reach_text_model)
@sources_ns.route('/reach/process_text')
class ReachProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process text with REACH and return INDRA Statements.
Parameters
----------
text : str
The text to be processed.
offline : Optional[bool]
If set to True, the REACH system is run offline via a JAR file.
Otherwise (by default) the web service is called. Default: False
url : Optional[str]
URL for a REACH web service instance, which is used for reading if
provided. If not provided but offline is set to False (its default
value), REACH_TEXT_URL set in configuration will be used. If not
provided in configuration, the Arizona REACH web service is called
(http://agathon.sista.arizona.edu:8080/odinweb/api/help).
Default: None
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
text = args.get('text')
offline = True if args.get('offline') else False
given_url = args.get('url')
config_url = get_config('REACH_TEXT_URL', failure_ok=True)
# Order: URL given as an explicit argument in the request. Then any URL
# set in the configuration. Then, unless offline is set, use the
# default REACH web service URL.
if 'url' in args: # This is to take None if explicitly given
url = given_url
elif config_url:
url = config_url
elif not offline:
url = reach_text_url
else:
url = None
# If a URL is set, prioritize it over the offline setting
if url:
offline = False
rp = reach.process_text(text, offline=offline, url=url)
return _stmts_from_proc(rp)
@sources_ns.expect(reach_json_model)
@sources_ns.route('/reach/process_json')
class ReachProcessJson(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process REACH json and return INDRA Statements.
Parameters
----------
json : str
The json string to be processed.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
json_str = args.get('json')
rp = reach.process_json_str(json_str)
return _stmts_from_proc(rp)
@sources_ns.expect(reach_pmc_model)
@sources_ns.route('/reach/process_pmc')
class ReachProcessPmc(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process PubMedCentral article and return INDRA Statements.
Parameters
----------
pmc_id : str
The ID of a PubmedCentral article. The string may start with PMC
but passing just the ID also works.
Examples: 3717945, PMC3717945
https://www.ncbi.nlm.nih.gov/pmc/
offline : Optional[bool]
If set to True, the REACH system is run offline via a JAR file.
Otherwise (by default) the web service is called. Default: False
url : Optional[str]
URL for a REACH web service instance, which is used for reading if
provided. If not provided but offline is set to False (its default
value), REACH_NXML_URL set in configuration will be used. If not
provided in configuration, the Arizona REACH web service is called
(http://agathon.sista.arizona.edu:8080/odinweb/api/help).
Default: None
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
pmcid = args.get('pmcid')
offline = True if args.get('offline') else False
given_url = args.get('url')
config_url = get_config('REACH_NXML_URL', failure_ok=True)
# Order: URL given as an explicit argument in the request. Then any URL
# set in the configuration. Then, unless offline is set, use the
# default REACH web service URL.
if 'url' in args: # This is to take None if explicitly given
url = given_url
elif config_url:
url = config_url
elif not offline:
url = reach_nxml_url
else:
url = None
# If a URL is set, prioritize it over the offline setting
if url:
offline = False
rp = reach.process_pmc(pmcid, offline=offline, url=url)
return _stmts_from_proc(rp)
# TRIPS
xml_model = api.model('XML', {'xml_str': fields.String})
@sources_ns.expect(bio_text_model)
@sources_ns.route('/trips/process_text')
class TripsProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process text with TRIPS and return INDRA Statements.
Parameters
----------
text : str
The text to be processed.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
text = args.get('text')
tp = trips.process_text(text)
return _stmts_from_proc(tp)
@sources_ns.expect(xml_model)
@sources_ns.route('/trips/process_xml')
class TripsProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process TRIPS EKB XML and return INDRA Statements.
Parameters
----------
xml_string : str
A TRIPS extraction knowledge base (EKB) string to be processed.
http://trips.ihmc.us/parser/api.html
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
xml_str = args.get('xml_str')
tp = trips.process_xml(xml_str)
return _stmts_from_proc(tp)
# Sofia
text_auth_model = api.inherit('TextAuth', wm_text_model, {
'auth': fields.List(fields.String, example=['USER', 'PASS'])})
# Hide documentation because webservice is unresponsive
@sources_ns.expect(text_auth_model)
@sources_ns.route('/sofia/process_text', doc=False)
class SofiaProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process text with Sofia and return INDRA Statements.
Parameters
----------
text : str
A string containing the text to be processed with Sofia.
auth : Optional[list]
A username/password pair for the Sofia web service. If not given,
the SOFIA_USERNAME and SOFIA_PASSWORD values are loaded from either
the INDRA config or the environment.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
text = args.get('text')
auth = args.get('auth')
sp = sofia.process_text(text, auth=auth)
return _stmts_from_proc(sp)
# Eidos
eidos_text_model = api.inherit('EidosText', wm_text_model, {
'webservice': fields.String,
'grounding_ns': fields.String(example='WM')
})
eidos_jsonld_model = api.inherit('EidosJsonld', jsonld_model, {
'grounding_ns': fields.String(example='WM')
})
# Hide docs until webservice is available
@sources_ns.expect(eidos_text_model)
@sources_ns.route('/eidos/process_text', doc=False)
class EidosProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process text with EIDOS and return INDRA Statements.
Parameters
----------
text : str
The text to be processed.
webservice : Optional[str]
An Eidos reader web service URL to send the request to.
If None, the reading is assumed to be done with the Eidos JAR
rather than via a web service. Default: None
grounding_ns : Optional[list]
A list of name spaces for which INDRA should represent groundings,
when given. If not specified or None, all grounding name spaces are
propagated. If an empty list, no groundings are propagated.
Example: ['UN', 'WM'], Default: None
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
text = args.get('text')
webservice = args.get('webservice')
grounding_ns = args.get('grounding_ns')
if not webservice:
abort(400, 'No web service address provided.')
ep = eidos.process_text(text, webservice=webservice,
grounding_ns=grounding_ns)
return _stmts_from_proc(ep)
@sources_ns.expect(eidos_jsonld_model)
@sources_ns.route('/eidos/process_jsonld')
class EidosProcessJsonld(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process an EIDOS JSON-LD and return INDRA Statements.
Parameters
----------
jsonld : str
The JSON-LD string to be processed.
grounding_ns : Optional[list]
A list of name spaces for which INDRA should represent groundings,
when given. If not specified or None, all grounding name spaces are
propagated. If an empty list, no groundings are propagated.
Example: ['UN', 'WM'], Default: None
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
eidos_json = args.get('jsonld')
grounding_ns = args.get('grounding_ns')
ep = eidos.process_json_str(eidos_json, grounding_ns=grounding_ns)
return _stmts_from_proc(ep)
# Hume
@sources_ns.expect(jsonld_model)
@sources_ns.route('/hume/process_jsonld')
class HumeProcessJsonld(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process Hume JSON-LD and return INDRA Statements.
Parameters
----------
jsonld : str
The JSON-LD string to be processed.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
jsonld_str = args.get('jsonld')
jsonld = json.loads(jsonld_str)
hp = hume.process_jsonld(jsonld)
return _stmts_from_proc(hp)
# CWMS
@sources_ns.expect(wm_text_model)
@sources_ns.route('/cwms/process_text')
class CwmsProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process text with CWMS and return INDRA Statements.
Parameters
----------
text : str
Text to process
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
text = args.get('text')
cp = cwms.process_text(text)
return _stmts_from_proc(cp)
# BEL
bel_rdf_model = api.model('BelRdf', {'belrdf': fields.String})
@sources_ns.expect(genes_model)
@sources_ns.route('/bel/process_pybel_neighborhood')
class BelProcessNeighborhood(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process BEL Large Corpus neighborhood and return INDRA Statements.
Parameters
----------
genes : list[str]
A list of entity names (e.g., gene names) which will be used as the
basis of filtering the result. If any of the Agents of an extracted
INDRA Statement has a name appearing in this list, the Statement is
retained in the result.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
genes = args.get('genes')
bp = bel.process_pybel_neighborhood(genes)
return _stmts_from_proc(bp)
@sources_ns.expect(bel_rdf_model)
@sources_ns.route('/bel/process_belrdf')
class BelProcessBelRdf(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process BEL RDF and return INDRA Statements.
Parameters
----------
belrdf : str
A BEL/RDF string to be processed. This will usually come from
reading a .rdf file.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
belrdf = args.get('belrdf')
bp = bel.process_belrdf(belrdf)
return _stmts_from_proc(bp)
# BioPax
source_target_model = api.model('SourceTarget', {
'source': fields.List(fields.String, example=['BRAF', 'RAF1', 'ARAF']),
'target': fields.List(fields.String, example=['MAP2K1', 'MAP2K2'])
})
@sources_ns.expect(genes_model)
@sources_ns.route('/biopax/process_pc_pathsbetween')
class BiopaxPathsBetween(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""
Process PathwayCommons paths between genes, return INDRA Statements.
Parameters
----------
genes : list
A list of HGNC gene symbols to search for paths between.
Examples: ['BRAF', 'MAP2K1']
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
genes = args.get('genes')
bp = biopax.process_pc_pathsbetween(genes)
return _stmts_from_proc(bp)
@sources_ns.expect(source_target_model)
@sources_ns.route('/biopax/process_pc_pathsfromto')
class BiopaxPathsFromTo(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""
Process PathwayCommons paths from-to genes, return INDRA Statements.
Parameters
----------
source : list
A list of HGNC gene symbols that are the sources of paths being
searched for.
Examples: ['BRAF', 'RAF1', 'ARAF']
target : list
A list of HGNC gene symbols that are the targets of paths being
searched for.
Examples: ['MAP2K1', 'MAP2K2']
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
source = args.get('source')
target = args.get('target')
bp = biopax.process_pc_pathsfromto(source, target)
return _stmts_from_proc(bp)
@sources_ns.expect(genes_model)
@sources_ns.route('/biopax/process_pc_neighborhood')
class BiopaxNeighborhood(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process PathwayCommons neighborhood, return INDRA Statements.
Parameters
----------
genes : list
A list of HGNC gene symbols to search the neighborhood of.
Examples: ['BRAF'], ['BRAF', 'MAP2K1']
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
genes = args.get('genes')
bp = biopax.process_pc_neighborhood(genes)
return _stmts_from_proc(bp)
# Create resources for Assemblers namespace
pysb_stmts_model = api.inherit('PysbStatements', stmts_model, {
'export_format': fields.String(example='kappa')
})
@assemblers_ns.expect(pysb_stmts_model)
@assemblers_ns.route('/pysb')
class AssemblePysb(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble INDRA Statements and return PySB model string.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
export_format : str
The format to export into, for instance "kappa", "bngl",
"sbml", "matlab", "mathematica", "potterswheel". See
http://pysb.readthedocs.io/en/latest/modules/export/index.html
for a list of supported formats. In addition to the formats
supported by PySB itself, this method also provides "sbgn"
output.
Returns
-------
image or model
Assembled exported model. If export_format is kappa_im or kappa_cm,
image is returned. Otherwise model string is returned.
"""
args = request.json
stmts_json = args.get('statements')
export_format = args.get('export_format')
stmts = stmts_from_json(stmts_json)
pa = PysbAssembler()
pa.add_statements(stmts)
pa.make_model()
try:
for m in pa.model.monomers:
pysb_assembler.set_extended_initial_condition(pa.model, m, 0)
except Exception as e:
logger.exception(e)
if not export_format:
model_str = pa.print_model()
elif export_format in ('kappa_im', 'kappa_cm'):
fname = 'model_%s.png' % export_format
root = os.path.dirname(os.path.abspath(fname))
graph = pa.export_model(format=export_format, file_name=fname)
with open(fname, 'rb') as fh:
data = 'data:image/png;base64,%s' % \
base64.b64encode(fh.read()).decode()
return {'image': data}
else:
try:
model_str = pa.export_model(format=export_format)
except Exception as e:
logger.exception(e)
model_str = ''
res = {'model': model_str}
return res
@assemblers_ns.expect(stmts_model)
@assemblers_ns.route('/cx')
class AssembleCx(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble INDRA Statements and return CX network json.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
model
Assembled model string.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
ca = CxAssembler(stmts)
model_str = ca.make_model()
res = {'model': model_str}
return res
@assemblers_ns.expect(stmts_model)
@assemblers_ns.route('/graph')
class AssembleGraph(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble INDRA Statements and return Graphviz graph dot string.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
model
Assembled model string.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
ga = GraphAssembler(stmts)
model_str = ga.make_model()
res = {'model': model_str}
return res
@assemblers_ns.expect(stmts_model)
@assemblers_ns.route('/cyjs')
class AssembleCyjs(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble INDRA Statements and return Cytoscape JS network.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
json_model : dict
Json dictionary containing graph information.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
cja = CyJSAssembler(stmts)
cja.make_model(grouping=True)
model_str = cja.print_cyjs_graph()
return json.loads(model_str)
@assemblers_ns.expect(stmts_model)
@assemblers_ns.route('/english')
class AssembleEnglish(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble each statement into English sentence.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
sentences : dict
Dictionary mapping Statement UUIDs with English sentences.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
sentences = {}
for st in stmts:
enga = EnglishAssembler()
enga.add_statements([st])
model_str = enga.make_model()
sentences[st.uuid] = model_str
res = {'sentences': sentences}
return res
@assemblers_ns.expect(stmts_model)
@assemblers_ns.route('/sif/loopy')
class AssembleLoopy(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble INDRA Statements into a Loopy model using SIF Assembler.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
loopy_url : str
Assembled Loopy model string.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
sa = SifAssembler(stmts)
sa.make_model(use_name_as_key=True)
model_str = sa.print_loopy(as_url=True)
res = {'loopy_url': model_str}
return res
# Create resources for NDEx namespace
network_model = api.model('Network', {'network_id': fields.String})
@ndex_ns.expect(stmts_model)
@ndex_ns.route('/share_model_ndex')
class ShareModelNdex(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Upload the model to NDEX.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
network_id : str
ID of uploaded NDEx network.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
ca = CxAssembler(stmts)
for n, v in args.items():
ca.cx['networkAttributes'].append({'n': n, 'v': v, 'd': 'string'})
ca.make_model()
network_id = ca.upload_model(private=False)
return {'network_id': network_id}
@ndex_ns.expect(network_model)
@ndex_ns.route('/fetch_model_ndex')
class FetchModelNdex(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Download model and associated pieces from NDEX.
Parameters
----------
network_id : str
ID of NDEx network to fetch.
Returns
-------
stored_data : dict
Dictionary representing the network.
"""
args = request.json
network_id = args.get('network_id')
cx = process_ndex_network(network_id)
network_attr = [x for x in cx.cx if x.get('networkAttributes')]
network_attr = network_attr[0]['networkAttributes']
keep_keys = ['txt_input', 'parser',
'model_elements', 'preset_pos', 'stmts',
'sentences', 'evidence', 'cell_line', 'mrna', 'mutations']
stored_data = {}
for d in network_attr:
if d['n'] in keep_keys:
stored_data[d['n']] = d['v']
return stored_data
# Create resources for INDRA DB REST namespace
stmt_model = api.model('Statement', {'statement': fields.Nested(dict_model)})
@indra_db_rest_ns.expect(stmt_model)
@indra_db_rest_ns.route('/get_evidence')
class GetEvidence(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Get all evidence for a given INDRA statement.
Parameters
----------
statements : indra.statements.Statement.to_json()
An INDRA Statement to get evidence for.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of retrieved INDRA Statements with evidence.
"""
args = request.json
stmt_json = args.get('statement')
stmt = Statement._from_json(stmt_json)
def _get_agent_ref(agent):
"""Get the preferred ref for an agent for db web api."""
if agent is None:
return None
ag_hgnc_id = hgnc_client.get_hgnc_id(agent.name)
if ag_hgnc_id is not None:
return ag_hgnc_id + "@HGNC"
db_refs = agent.db_refs
for namespace in ['HGNC', 'FPLX', 'CHEBI', 'TEXT']:
if namespace in db_refs.keys():
return '%s@%s' % (db_refs[namespace], namespace)
return '%s@%s' % (agent.name, 'TEXT')
def _get_matching_stmts(stmt_ref):
# Filter by statement type.
stmt_type = stmt_ref.__class__.__name__
agent_name_list = [
_get_agent_ref(ag) for ag in stmt_ref.agent_list()]
non_binary_statements = (Complex, SelfModification, ActiveForm)
# TODO: We should look at more than just the agent name.
# Doing so efficiently may require changes to the web api.
if isinstance(stmt_ref, non_binary_statements):
agent_list = [ag_name for ag_name in agent_name_list
if ag_name is not None]
kwargs = {}
else:
agent_list = []
kwargs = {k: v for k, v in zip(['subject', 'object'],
agent_name_list)}
if not any(kwargs.values()):
return []
print(agent_list)
stmts = get_statements(agents=agent_list, stmt_type=stmt_type,
simple_response=True, **kwargs)
return stmts
stmts_out = _get_matching_stmts(stmt)
agent_name_list = [ag.name for ag in stmt.agent_list()]
stmts_out = stmts = filter_concept_names(
stmts_out, agent_name_list, 'all')
return _return_stmts(stmts_out)
# Create resources for Databases namespace
cbio_model = api.model('Cbio', {
'gene_list': fields.List(fields.String, example=["FOSL1", "GRB2"]),
'cell_lines': fields.List(fields.String, example=['COLO679_SKIN'])
})
@databases_ns.expect(cbio_model)
@databases_ns.route('/cbio/get_ccle_mrna')
class CbioMrna(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Get CCLE mRNA amounts using cBioClient
Parameters
----------
gene_list : list[str]
A list of HGNC gene symbols to get mRNA amounts for.
cell_lines : list[str]
A list of CCLE cell line names to get mRNA amounts for.
Returns
-------
mrna_amounts : dict[dict[float]]
A dict keyed to cell lines containing a dict keyed to genes
containing float
"""
args = request.json
gene_list = args.get('gene_list')
cell_lines = args.get('cell_lines')
mrna_amounts = cbio_client.get_ccle_mrna(gene_list, cell_lines)
res = {'mrna_amounts': mrna_amounts}
return res
@databases_ns.expect(cbio_model)
@databases_ns.route('/cbio/get_ccle_cna')
class CbioCna(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Get CCLE CNA
-2 = homozygous deletion
-1 = hemizygous deletion
0 = neutral / no change
1 = gain
2 = high level amplification
Parameters
----------
gene_list : list[str]
A list of HGNC gene symbols to get mutations in.
cell_lines : list[str]
A list of CCLE cell line names to get mutations for.
Returns
-------
cna : dict[dict[int]]
A dict keyed to cases containing a dict keyed to genes
containing int
"""
args = request.json
gene_list = args.get('gene_list')
cell_lines = args.get('cell_lines')
cna = cbio_client.get_ccle_cna(gene_list, cell_lines)
res = {'cna': cna}
return res
@databases_ns.expect(cbio_model)
@databases_ns.route('/cbio/get_ccle_mutations')
class CbioMutations(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Get CCLE mutations
Parameters
----------
gene_list : list[str]
A list of HGNC gene symbols to get mutations in
cell_lines : list[str]
A list of CCLE cell line names to get mutations for.
Returns
-------
mutations : dict
The result from cBioPortal as a dict in the format
{cell_line : {gene : [mutation1, mutation2, ...] }}
"""
args = request.json
gene_list = args.get('gene_list')
cell_lines = args.get('cell_lines')
mutations = cbio_client.get_ccle_mutations(gene_list, cell_lines)
res = {'mutations': mutations}
return res
if __name__ == '__main__':
argparser = argparse.ArgumentParser('Run the INDRA REST API')
argparser.add_argument('--host', default='0.0.0.0')
argparser.add_argument('--port', default=8080, type=int)
argparserargs = argparser.parse_args()
app.run(host=argparserargs.host, port=argparserargs.port)
| {
"content_hash": "548142db16f34960cf882a008c7bcb66",
"timestamp": "",
"source": "github",
"line_count": 1332,
"max_line_length": 80,
"avg_line_length": 32.7957957957958,
"alnum_prop": 0.5870341543814669,
"repo_name": "johnbachman/belpy",
"id": "b50fefcc5f01d02d53e7d2f542a89a838d44f20c",
"size": "43684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest_api/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "318177"
},
{
"name": "Ruby",
"bytes": "433"
},
{
"name": "Shell",
"bytes": "430"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sitio', '0025_auto_20170612_2133'),
]
operations = [
migrations.RemoveField(
model_name='comentariosdenunciados',
name='url_comentario',
),
migrations.RemoveField(
model_name='itinerariosdenunciados',
name='url_itinerario',
),
]
| {
"content_hash": "541d74b966acecc75afbc1357e545334",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 48,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.5906183368869936,
"repo_name": "giocastagno/I.W._Delpuppo_Kopech_Castagno",
"id": "2947590e938a6e1b539e0bdb97295197c4ed9cf4",
"size": "542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "turismo/sitio/migrations/0026_auto_20170612_2208.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3777"
},
{
"name": "HTML",
"bytes": "34674"
},
{
"name": "JavaScript",
"bytes": "1500"
},
{
"name": "Python",
"bytes": "119953"
},
{
"name": "Shell",
"bytes": "44470"
}
],
"symlink_target": ""
} |
import flatbuffers
# an example documentation comment: monster object
class Monster(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsMonster(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Monster()
x.Init(buf, n + offset)
return x
@classmethod
def MonsterBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4D\x4F\x4E\x53", size_prefixed=size_prefixed)
# Monster
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Monster
def Pos(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = o + self._tab.Pos
from .Vec3 import Vec3
obj = Vec3()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def Mana(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int16Flags, o + self._tab.Pos)
return 150
# Monster
def Hp(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int16Flags, o + self._tab.Pos)
return 100
# Monster
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Monster
def Inventory(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def InventoryAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Monster
def InventoryLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def Color(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 8
# Monster
def TestType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Monster
def Test(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# Monster
def Test4(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
from .Test import Test
obj = Test()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def Test4Length(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def Testarrayofstring(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# Monster
def TestarrayofstringLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return self._tab.VectorLen(o)
return 0
# an example documentation comment: this will end up in the generated code
# multiline too
# Monster
def Testarrayoftables(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .Monster import Monster
obj = Monster()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def TestarrayoftablesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def Enemy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .Monster import Monster
obj = Monster()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def Testnestedflatbuffer(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def TestnestedflatbufferAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Monster
def TestnestedflatbufferLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def Testempty(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(32))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .Stat import Stat
obj = Stat()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def Testbool(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(34))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# Monster
def Testhashs32Fnv1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashu32Fnv1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(38))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashs64Fnv1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(40))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashu64Fnv1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(42))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashs32Fnv1a(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(44))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashu32Fnv1a(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(46))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashs64Fnv1a(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(48))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashu64Fnv1a(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(50))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def Testarrayofbools(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(52))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.BoolFlags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def TestarrayofboolsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(52))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.BoolFlags, o)
return 0
# Monster
def TestarrayofboolsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(52))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def Testf(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(54))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 3.14159
# Monster
def Testf2(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(56))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 3.0
# Monster
def Testf3(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(58))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# Monster
def Testarrayofstring2(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(60))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# Monster
def Testarrayofstring2Length(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(60))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def Testarrayofsortedstruct(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(62))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 8
from .Ability import Ability
obj = Ability()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def TestarrayofsortedstructLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(62))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def Flex(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(64))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def FlexAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(64))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Monster
def FlexLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(64))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def Test5(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(66))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
from .Test import Test
obj = Test()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def Test5Length(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(66))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfLongs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(68))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Monster
def VectorOfLongsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(68))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
return 0
# Monster
def VectorOfLongsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(68))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfDoubles(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(70))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Monster
def VectorOfDoublesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(70))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float64Flags, o)
return 0
# Monster
def VectorOfDoublesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(70))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def ParentNamespaceTest(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(72))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .InParentNamespace import InParentNamespace
obj = InParentNamespace()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def VectorOfReferrables(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(74))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .Referrable import Referrable
obj = Referrable()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def VectorOfReferrablesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(74))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def SingleWeakReference(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(76))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def VectorOfWeakReferences(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(78))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Monster
def VectorOfWeakReferencesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(78))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
return 0
# Monster
def VectorOfWeakReferencesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(78))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfStrongReferrables(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(80))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .Referrable import Referrable
obj = Referrable()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def VectorOfStrongReferrablesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(80))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def CoOwningReference(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(82))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def VectorOfCoOwningReferences(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(84))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Monster
def VectorOfCoOwningReferencesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(84))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
return 0
# Monster
def VectorOfCoOwningReferencesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(84))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def NonOwningReference(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(86))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def VectorOfNonOwningReferences(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(88))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Monster
def VectorOfNonOwningReferencesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(88))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
return 0
# Monster
def VectorOfNonOwningReferencesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(88))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def AnyUniqueType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(90))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Monster
def AnyUnique(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(92))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# Monster
def AnyAmbiguousType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(94))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Monster
def AnyAmbiguous(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(96))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# Monster
def VectorOfEnums(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(98))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def VectorOfEnumsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(98))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Monster
def VectorOfEnumsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(98))
if o != 0:
return self._tab.VectorLen(o)
return 0
def MonsterStart(builder): builder.StartObject(48)
def MonsterAddPos(builder, pos): builder.PrependStructSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(pos), 0)
def MonsterAddMana(builder, mana): builder.PrependInt16Slot(1, mana, 150)
def MonsterAddHp(builder, hp): builder.PrependInt16Slot(2, hp, 100)
def MonsterAddName(builder, name): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def MonsterAddInventory(builder, inventory): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(inventory), 0)
def MonsterStartInventoryVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def MonsterAddColor(builder, color): builder.PrependUint8Slot(6, color, 8)
def MonsterAddTestType(builder, testType): builder.PrependUint8Slot(7, testType, 0)
def MonsterAddTest(builder, test): builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(test), 0)
def MonsterAddTest4(builder, test4): builder.PrependUOffsetTRelativeSlot(9, flatbuffers.number_types.UOffsetTFlags.py_type(test4), 0)
def MonsterStartTest4Vector(builder, numElems): return builder.StartVector(4, numElems, 2)
def MonsterAddTestarrayofstring(builder, testarrayofstring): builder.PrependUOffsetTRelativeSlot(10, flatbuffers.number_types.UOffsetTFlags.py_type(testarrayofstring), 0)
def MonsterStartTestarrayofstringVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def MonsterAddTestarrayoftables(builder, testarrayoftables): builder.PrependUOffsetTRelativeSlot(11, flatbuffers.number_types.UOffsetTFlags.py_type(testarrayoftables), 0)
def MonsterStartTestarrayoftablesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def MonsterAddEnemy(builder, enemy): builder.PrependUOffsetTRelativeSlot(12, flatbuffers.number_types.UOffsetTFlags.py_type(enemy), 0)
def MonsterAddTestnestedflatbuffer(builder, testnestedflatbuffer): builder.PrependUOffsetTRelativeSlot(13, flatbuffers.number_types.UOffsetTFlags.py_type(testnestedflatbuffer), 0)
def MonsterStartTestnestedflatbufferVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def MonsterAddTestempty(builder, testempty): builder.PrependUOffsetTRelativeSlot(14, flatbuffers.number_types.UOffsetTFlags.py_type(testempty), 0)
def MonsterAddTestbool(builder, testbool): builder.PrependBoolSlot(15, testbool, 0)
def MonsterAddTesthashs32Fnv1(builder, testhashs32Fnv1): builder.PrependInt32Slot(16, testhashs32Fnv1, 0)
def MonsterAddTesthashu32Fnv1(builder, testhashu32Fnv1): builder.PrependUint32Slot(17, testhashu32Fnv1, 0)
def MonsterAddTesthashs64Fnv1(builder, testhashs64Fnv1): builder.PrependInt64Slot(18, testhashs64Fnv1, 0)
def MonsterAddTesthashu64Fnv1(builder, testhashu64Fnv1): builder.PrependUint64Slot(19, testhashu64Fnv1, 0)
def MonsterAddTesthashs32Fnv1a(builder, testhashs32Fnv1a): builder.PrependInt32Slot(20, testhashs32Fnv1a, 0)
def MonsterAddTesthashu32Fnv1a(builder, testhashu32Fnv1a): builder.PrependUint32Slot(21, testhashu32Fnv1a, 0)
def MonsterAddTesthashs64Fnv1a(builder, testhashs64Fnv1a): builder.PrependInt64Slot(22, testhashs64Fnv1a, 0)
def MonsterAddTesthashu64Fnv1a(builder, testhashu64Fnv1a): builder.PrependUint64Slot(23, testhashu64Fnv1a, 0)
def MonsterAddTestarrayofbools(builder, testarrayofbools): builder.PrependUOffsetTRelativeSlot(24, flatbuffers.number_types.UOffsetTFlags.py_type(testarrayofbools), 0)
def MonsterStartTestarrayofboolsVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def MonsterAddTestf(builder, testf): builder.PrependFloat32Slot(25, testf, 3.14159)
def MonsterAddTestf2(builder, testf2): builder.PrependFloat32Slot(26, testf2, 3.0)
def MonsterAddTestf3(builder, testf3): builder.PrependFloat32Slot(27, testf3, 0.0)
def MonsterAddTestarrayofstring2(builder, testarrayofstring2): builder.PrependUOffsetTRelativeSlot(28, flatbuffers.number_types.UOffsetTFlags.py_type(testarrayofstring2), 0)
def MonsterStartTestarrayofstring2Vector(builder, numElems): return builder.StartVector(4, numElems, 4)
def MonsterAddTestarrayofsortedstruct(builder, testarrayofsortedstruct): builder.PrependUOffsetTRelativeSlot(29, flatbuffers.number_types.UOffsetTFlags.py_type(testarrayofsortedstruct), 0)
def MonsterStartTestarrayofsortedstructVector(builder, numElems): return builder.StartVector(8, numElems, 4)
def MonsterAddFlex(builder, flex): builder.PrependUOffsetTRelativeSlot(30, flatbuffers.number_types.UOffsetTFlags.py_type(flex), 0)
def MonsterStartFlexVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def MonsterAddTest5(builder, test5): builder.PrependUOffsetTRelativeSlot(31, flatbuffers.number_types.UOffsetTFlags.py_type(test5), 0)
def MonsterStartTest5Vector(builder, numElems): return builder.StartVector(4, numElems, 2)
def MonsterAddVectorOfLongs(builder, vectorOfLongs): builder.PrependUOffsetTRelativeSlot(32, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfLongs), 0)
def MonsterStartVectorOfLongsVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def MonsterAddVectorOfDoubles(builder, vectorOfDoubles): builder.PrependUOffsetTRelativeSlot(33, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfDoubles), 0)
def MonsterStartVectorOfDoublesVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def MonsterAddParentNamespaceTest(builder, parentNamespaceTest): builder.PrependUOffsetTRelativeSlot(34, flatbuffers.number_types.UOffsetTFlags.py_type(parentNamespaceTest), 0)
def MonsterAddVectorOfReferrables(builder, vectorOfReferrables): builder.PrependUOffsetTRelativeSlot(35, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfReferrables), 0)
def MonsterStartVectorOfReferrablesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def MonsterAddSingleWeakReference(builder, singleWeakReference): builder.PrependUint64Slot(36, singleWeakReference, 0)
def MonsterAddVectorOfWeakReferences(builder, vectorOfWeakReferences): builder.PrependUOffsetTRelativeSlot(37, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfWeakReferences), 0)
def MonsterStartVectorOfWeakReferencesVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def MonsterAddVectorOfStrongReferrables(builder, vectorOfStrongReferrables): builder.PrependUOffsetTRelativeSlot(38, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfStrongReferrables), 0)
def MonsterStartVectorOfStrongReferrablesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def MonsterAddCoOwningReference(builder, coOwningReference): builder.PrependUint64Slot(39, coOwningReference, 0)
def MonsterAddVectorOfCoOwningReferences(builder, vectorOfCoOwningReferences): builder.PrependUOffsetTRelativeSlot(40, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfCoOwningReferences), 0)
def MonsterStartVectorOfCoOwningReferencesVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def MonsterAddNonOwningReference(builder, nonOwningReference): builder.PrependUint64Slot(41, nonOwningReference, 0)
def MonsterAddVectorOfNonOwningReferences(builder, vectorOfNonOwningReferences): builder.PrependUOffsetTRelativeSlot(42, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfNonOwningReferences), 0)
def MonsterStartVectorOfNonOwningReferencesVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def MonsterAddAnyUniqueType(builder, anyUniqueType): builder.PrependUint8Slot(43, anyUniqueType, 0)
def MonsterAddAnyUnique(builder, anyUnique): builder.PrependUOffsetTRelativeSlot(44, flatbuffers.number_types.UOffsetTFlags.py_type(anyUnique), 0)
def MonsterAddAnyAmbiguousType(builder, anyAmbiguousType): builder.PrependUint8Slot(45, anyAmbiguousType, 0)
def MonsterAddAnyAmbiguous(builder, anyAmbiguous): builder.PrependUOffsetTRelativeSlot(46, flatbuffers.number_types.UOffsetTFlags.py_type(anyAmbiguous), 0)
def MonsterAddVectorOfEnums(builder, vectorOfEnums): builder.PrependUOffsetTRelativeSlot(47, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfEnums), 0)
def MonsterStartVectorOfEnumsVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def MonsterEnd(builder): return builder.EndObject()
| {
"content_hash": "a07230302026c2660eafca814dece2ac",
"timestamp": "",
"source": "github",
"line_count": 685,
"max_line_length": 200,
"avg_line_length": 43.198540145985405,
"alnum_prop": 0.6712851880639383,
"repo_name": "bjtaylor1/osrm-backend",
"id": "5baf64d44a7bfded5815a5a784c197faca49cef4",
"size": "29683",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "third_party/flatbuffers/tests/MyGame/Example/Monster.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5938"
},
{
"name": "C++",
"bytes": "3742822"
},
{
"name": "CMake",
"bytes": "112846"
},
{
"name": "Dockerfile",
"bytes": "2321"
},
{
"name": "Gherkin",
"bytes": "1318554"
},
{
"name": "JavaScript",
"bytes": "366248"
},
{
"name": "Lua",
"bytes": "176445"
},
{
"name": "Makefile",
"bytes": "3170"
},
{
"name": "Python",
"bytes": "22321"
},
{
"name": "Shell",
"bytes": "14233"
}
],
"symlink_target": ""
} |
from google.cloud import compute_v1
# <INGREDIENT create_custom_shared_core_instance>
def create_custom_shared_core_instance(
project_id: str,
zone: str,
instance_name: str,
cpu_series: CustomMachineType.CPUSeries,
memory: int,
) -> compute_v1.Instance:
"""
Create a new VM instance with a custom type using shared CPUs.
Args:
project_id: project ID or project number of the Cloud project you want to use.
zone: name of the zone to create the instance in. For example: "us-west3-b"
instance_name: name of the new virtual machine (VM) instance.
cpu_series: the type of CPU you want to use. Pick one value from the CustomMachineType.CPUSeries enum.
For example: CustomMachineType.CPUSeries.E2_MICRO
memory: the amount of memory for the VM instance, in megabytes.
Return:
Instance object.
"""
assert cpu_series in (
CustomMachineType.CPUSeries.E2_MICRO,
CustomMachineType.CPUSeries.E2_SMALL,
CustomMachineType.CPUSeries.E2_MEDIUM,
)
custom_type = CustomMachineType(zone, cpu_series, memory)
newest_debian = get_image_from_family(
project="debian-cloud", family="debian-10"
)
disk_type = f"zones/{zone}/diskTypes/pd-standard"
disks = [disk_from_image(disk_type, 10, True, newest_debian.self_link)]
return create_instance(project_id, zone, instance_name, disks, str(custom_type))
# </INGREDIENT>
| {
"content_hash": "2e5b4686129347df652c92f2e12cdc46",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 110,
"avg_line_length": 36.525,
"alnum_prop": 0.6844626967830253,
"repo_name": "googleapis/python-compute",
"id": "a29193438ff93a0587499c9ed368a61e11653aee",
"size": "2310",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "samples/ingredients/instances/custom_machine_types/create_shared_with_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "32681847"
},
{
"name": "Shell",
"bytes": "30663"
}
],
"symlink_target": ""
} |
from .base.errors import *
| {
"content_hash": "cdad3f86bdbf05c0a58a5504338242a9",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 26,
"avg_line_length": 27,
"alnum_prop": 0.7407407407407407,
"repo_name": "DataDog/integrations-core",
"id": "a1e6700230b6ddca6f63ce66a0baf6fadc7f95cd",
"size": "157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datadog_checks_base/datadog_checks/errors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
import datetime
from six import moves
from tempest.api.compute import base
from tempest import exceptions
from tempest import test
class ListServersNegativeV3Test(base.BaseV3ComputeTest):
force_tenant_isolation = True
@classmethod
@test.safe_setup
def setUpClass(cls):
super(ListServersNegativeV3Test, cls).setUpClass()
cls.client = cls.servers_client
# The following servers are created for use
# by the test methods in this class. These
# servers are cleaned up automatically in the
# tearDownClass method of the super-class.
cls.existing_fixtures = []
cls.deleted_fixtures = []
cls.start_time = datetime.datetime.utcnow()
for x in moves.xrange(2):
resp, srv = cls.create_test_server(wait_until='ACTIVE')
cls.existing_fixtures.append(srv)
resp, srv = cls.create_test_server()
cls.client.delete_server(srv['id'])
# We ignore errors on termination because the server may
# be put into ERROR status on a quick spawn, then delete,
# as the compute node expects the instance local status
# to be spawning, not deleted. See LP Bug#1061167
cls.client.wait_for_server_termination(srv['id'],
ignore_error=True)
cls.deleted_fixtures.append(srv)
@test.attr(type=['negative', 'gate'])
def test_list_servers_with_a_deleted_server(self):
# Verify deleted servers do not show by default in list servers
# List servers and verify server not returned
resp, body = self.client.list_servers()
servers = body['servers']
deleted_ids = [s['id'] for s in self.deleted_fixtures]
actual = [srv for srv in servers
if srv['id'] in deleted_ids]
self.assertEqual('200', resp['status'])
self.assertEqual([], actual)
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_image(self):
# Listing servers for a non existing image returns empty list
non_existing_image = '1234abcd-zzz0-aaa9-ppp3-0987654abcde'
resp, body = self.client.list_servers(dict(image=non_existing_image))
servers = body['servers']
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_flavor(self):
# Listing servers by non existing flavor returns empty list
non_existing_flavor = 1234
resp, body = self.client.list_servers(dict(flavor=non_existing_flavor))
servers = body['servers']
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_server_name(self):
# Listing servers for a non existent server name returns empty list
non_existing_name = 'junk_server_1234'
resp, body = self.client.list_servers(dict(name=non_existing_name))
servers = body['servers']
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
@test.attr(type=['negative', 'gate'])
def test_list_servers_status_non_existing(self):
# Return an empty list when invalid status is specified
non_existing_status = 'BALONEY'
resp, body = self.client.list_servers(dict(status=non_existing_status))
servers = body['servers']
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
@test.attr(type='gate')
def test_list_servers_by_limits(self):
# List servers by specifying limits
resp, body = self.client.list_servers({'limit': 1})
self.assertEqual('200', resp['status'])
self.assertEqual(1, len([x for x in body['servers'] if 'id' in x]))
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_limits_greater_than_actual_count(self):
# List servers by specifying a greater value for limit
resp, body = self.client.list_servers({'limit': 100})
self.assertEqual('200', resp['status'])
self.assertEqual(len(self.existing_fixtures), len(body['servers']))
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_limits_pass_string(self):
# Return an error if a string value is passed for limit
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'limit': 'testing'})
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_limits_pass_negative_value(self):
# Return an error if a negative value for limit is passed
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'limit': -1})
@test.attr(type='gate')
def test_list_servers_by_changes_since(self):
# Servers are listed by specifying changes-since date
changes_since = {'changes_since': self.start_time.isoformat()}
resp, body = self.client.list_servers(changes_since)
self.assertEqual('200', resp['status'])
# changes-since returns all instances, including deleted.
num_expected = (len(self.existing_fixtures) +
len(self.deleted_fixtures))
self.assertEqual(num_expected, len(body['servers']),
"Number of servers %d is wrong in %s" %
(num_expected, body['servers']))
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_changes_since_invalid_date(self):
# Return an error when invalid date format is passed
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'changes_since': '2011/01/01'})
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_changes_since_future_date(self):
# Return an empty list when a date in the future is passed
changes_since = {'changes_since': '2051-01-01T12:34:00Z'}
resp, body = self.client.list_servers(changes_since)
self.assertEqual('200', resp['status'])
self.assertEqual(0, len(body['servers']))
@test.attr(type=['negative', 'gate'])
def test_list_servers_detail_server_is_deleted(self):
# Server details are not listed for a deleted server
deleted_ids = [s['id'] for s in self.deleted_fixtures]
resp, body = self.client.list_servers_with_detail()
servers = body['servers']
actual = [srv for srv in servers
if srv['id'] in deleted_ids]
self.assertEqual('200', resp['status'])
self.assertEqual([], actual)
| {
"content_hash": "2b7cdfb7f5222ae83d4610b311077b12",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 79,
"avg_line_length": 44.513333333333335,
"alnum_prop": 0.6305226898307623,
"repo_name": "Mirantis/tempest",
"id": "18e5c677c3aad5f14661af423d3d853d625f9ffc",
"size": "7313",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tempest/api/compute/v3/servers/test_list_servers_negative.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3297127"
},
{
"name": "Shell",
"bytes": "8663"
}
],
"symlink_target": ""
} |
import itertools
import re
JNINativeInterface = """
test (JNICALL *foo)(int a, int* b);
jint (JNICALL *GetVersion)(JNIEnv *env);
jclass (JNICALL *DefineClass)
(JNIEnv *env, const char *name, jobject loader, const jbyte *buf,
jsize len);
jclass (JNICALL *FindClass)
(JNIEnv *env, const char *name);
jmethodID (JNICALL *FromReflectedMethod)
(JNIEnv *env, jobject method);
jfieldID (JNICALL *FromReflectedField)
(JNIEnv *env, jobject field);
jobject (JNICALL *ToReflectedMethod)
(JNIEnv *env, jclass cls, jmethodID methodID, jboolean isStatic);
jclass (JNICALL *GetSuperclass)
(JNIEnv *env, jclass sub);
jboolean (JNICALL *IsAssignableFrom)
(JNIEnv *env, jclass sub, jclass sup);
jobject (JNICALL *ToReflectedField)
(JNIEnv *env, jclass cls, jfieldID fieldID, jboolean isStatic);
jint (JNICALL *Throw)
(JNIEnv *env, jthrowable obj);
jint (JNICALL *ThrowNew)
(JNIEnv *env, jclass clazz, const char *msg);
jthrowable (JNICALL *ExceptionOccurred)
(JNIEnv *env);
void (JNICALL *ExceptionDescribe)
(JNIEnv *env);
void (JNICALL *ExceptionClear)
(JNIEnv *env);
void (JNICALL *FatalError)
(JNIEnv *env, const char *msg);
jint (JNICALL *PushLocalFrame)
(JNIEnv *env, jint capacity);
jobject (JNICALL *PopLocalFrame)
(JNIEnv *env, jobject result);
jobject (JNICALL *NewGlobalRef)
(JNIEnv *env, jobject lobj);
void (JNICALL *DeleteGlobalRef)
(JNIEnv *env, jobject gref);
void (JNICALL *DeleteLocalRef)
(JNIEnv *env, jobject obj);
jboolean (JNICALL *IsSameObject)
(JNIEnv *env, jobject obj1, jobject obj2);
jobject (JNICALL *NewLocalRef)
(JNIEnv *env, jobject ref);
jint (JNICALL *EnsureLocalCapacity)
(JNIEnv *env, jint capacity);
jobject (JNICALL *AllocObject)
(JNIEnv *env, jclass clazz);
jobject (JNICALL *NewObject)
(JNIEnv *env, jclass clazz, jmethodID methodID, ...);
jobject (JNICALL *NewObjectV)
(JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
jobject (JNICALL *NewObjectA)
(JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
jclass (JNICALL *GetObjectClass)
(JNIEnv *env, jobject obj);
jboolean (JNICALL *IsInstanceOf)
(JNIEnv *env, jobject obj, jclass clazz);
jmethodID (JNICALL *GetMethodID)
(JNIEnv *env, jclass clazz, const char *name, const char *sig);
jobject (JNICALL *CallObjectMethod)
(JNIEnv *env, jobject obj, jmethodID methodID, ...);
jobject (JNICALL *CallObjectMethodV)
(JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
jobject (JNICALL *CallObjectMethodA)
(JNIEnv *env, jobject obj, jmethodID methodID, const jvalue * args);
jboolean (JNICALL *CallBooleanMethod)
(JNIEnv *env, jobject obj, jmethodID methodID, ...);
jboolean (JNICALL *CallBooleanMethodV)
(JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
jboolean (JNICALL *CallBooleanMethodA)
(JNIEnv *env, jobject obj, jmethodID methodID, const jvalue * args);
jbyte (JNICALL *CallByteMethod)
(JNIEnv *env, jobject obj, jmethodID methodID, ...);
jbyte (JNICALL *CallByteMethodV)
(JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
jbyte (JNICALL *CallByteMethodA)
(JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args);
jchar (JNICALL *CallCharMethod)
(JNIEnv *env, jobject obj, jmethodID methodID, ...);
jchar (JNICALL *CallCharMethodV)
(JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
jchar (JNICALL *CallCharMethodA)
(JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args);
jshort (JNICALL *CallShortMethod)
(JNIEnv *env, jobject obj, jmethodID methodID, ...);
jshort (JNICALL *CallShortMethodV)
(JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
jshort (JNICALL *CallShortMethodA)
(JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args);
jint (JNICALL *CallIntMethod)
(JNIEnv *env, jobject obj, jmethodID methodID, ...);
jint (JNICALL *CallIntMethodV)
(JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
jint (JNICALL *CallIntMethodA)
(JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args);
jlong (JNICALL *CallLongMethod)
(JNIEnv *env, jobject obj, jmethodID methodID, ...);
jlong (JNICALL *CallLongMethodV)
(JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
jlong (JNICALL *CallLongMethodA)
(JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args);
jfloat (JNICALL *CallFloatMethod)
(JNIEnv *env, jobject obj, jmethodID methodID, ...);
jfloat (JNICALL *CallFloatMethodV)
(JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
jfloat (JNICALL *CallFloatMethodA)
(JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args);
jdouble (JNICALL *CallDoubleMethod)
(JNIEnv *env, jobject obj, jmethodID methodID, ...);
jdouble (JNICALL *CallDoubleMethodV)
(JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
jdouble (JNICALL *CallDoubleMethodA)
(JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args);
void (JNICALL *CallVoidMethod)
(JNIEnv *env, jobject obj, jmethodID methodID, ...);
void (JNICALL *CallVoidMethodV)
(JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
void (JNICALL *CallVoidMethodA)
(JNIEnv *env, jobject obj, jmethodID methodID, const jvalue * args);
jobject (JNICALL *CallNonvirtualObjectMethod)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
jobject (JNICALL *CallNonvirtualObjectMethodV)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
va_list args);
jobject (JNICALL *CallNonvirtualObjectMethodA)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
const jvalue * args);
jboolean (JNICALL *CallNonvirtualBooleanMethod)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
jboolean (JNICALL *CallNonvirtualBooleanMethodV)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
va_list args);
jboolean (JNICALL *CallNonvirtualBooleanMethodA)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
const jvalue * args);
jbyte (JNICALL *CallNonvirtualByteMethod)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
jbyte (JNICALL *CallNonvirtualByteMethodV)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
va_list args);
jbyte (JNICALL *CallNonvirtualByteMethodA)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
const jvalue *args);
jchar (JNICALL *CallNonvirtualCharMethod)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
jchar (JNICALL *CallNonvirtualCharMethodV)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
va_list args);
jchar (JNICALL *CallNonvirtualCharMethodA)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
const jvalue *args);
jshort (JNICALL *CallNonvirtualShortMethod)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
jshort (JNICALL *CallNonvirtualShortMethodV)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
va_list args);
jshort (JNICALL *CallNonvirtualShortMethodA)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
const jvalue *args);
jint (JNICALL *CallNonvirtualIntMethod)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
jint (JNICALL *CallNonvirtualIntMethodV)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
va_list args);
jint (JNICALL *CallNonvirtualIntMethodA)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
const jvalue *args);
jlong (JNICALL *CallNonvirtualLongMethod)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
jlong (JNICALL *CallNonvirtualLongMethodV)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
va_list args);
jlong (JNICALL *CallNonvirtualLongMethodA)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
const jvalue *args);
jfloat (JNICALL *CallNonvirtualFloatMethod)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
jfloat (JNICALL *CallNonvirtualFloatMethodV)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
va_list args);
jfloat (JNICALL *CallNonvirtualFloatMethodA)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
const jvalue *args);
jdouble (JNICALL *CallNonvirtualDoubleMethod)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
jdouble (JNICALL *CallNonvirtualDoubleMethodV)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
va_list args);
jdouble (JNICALL *CallNonvirtualDoubleMethodA)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
const jvalue *args);
void (JNICALL *CallNonvirtualVoidMethod)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
void (JNICALL *CallNonvirtualVoidMethodV)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
va_list args);
void (JNICALL *CallNonvirtualVoidMethodA)
(JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
const jvalue * args);
jfieldID (JNICALL *GetFieldID)
(JNIEnv *env, jclass clazz, const char *name, const char *sig);
jobject (JNICALL *GetObjectField)
(JNIEnv *env, jobject obj, jfieldID fieldID);
jboolean (JNICALL *GetBooleanField)
(JNIEnv *env, jobject obj, jfieldID fieldID);
jbyte (JNICALL *GetByteField)
(JNIEnv *env, jobject obj, jfieldID fieldID);
jchar (JNICALL *GetCharField)
(JNIEnv *env, jobject obj, jfieldID fieldID);
jshort (JNICALL *GetShortField)
(JNIEnv *env, jobject obj, jfieldID fieldID);
jint (JNICALL *GetIntField)
(JNIEnv *env, jobject obj, jfieldID fieldID);
jlong (JNICALL *GetLongField)
(JNIEnv *env, jobject obj, jfieldID fieldID);
jfloat (JNICALL *GetFloatField)
(JNIEnv *env, jobject obj, jfieldID fieldID);
jdouble (JNICALL *GetDoubleField)
(JNIEnv *env, jobject obj, jfieldID fieldID);
void (JNICALL *SetObjectField)
(JNIEnv *env, jobject obj, jfieldID fieldID, jobject val);
void (JNICALL *SetBooleanField)
(JNIEnv *env, jobject obj, jfieldID fieldID, jboolean val);
void (JNICALL *SetByteField)
(JNIEnv *env, jobject obj, jfieldID fieldID, jbyte val);
void (JNICALL *SetCharField)
(JNIEnv *env, jobject obj, jfieldID fieldID, jchar val);
void (JNICALL *SetShortField)
(JNIEnv *env, jobject obj, jfieldID fieldID, jshort val);
void (JNICALL *SetIntField)
(JNIEnv *env, jobject obj, jfieldID fieldID, jint val);
void (JNICALL *SetLongField)
(JNIEnv *env, jobject obj, jfieldID fieldID, jlong val);
void (JNICALL *SetFloatField)
(JNIEnv *env, jobject obj, jfieldID fieldID, jfloat val);
void (JNICALL *SetDoubleField)
(JNIEnv *env, jobject obj, jfieldID fieldID, jdouble val);
jmethodID (JNICALL *GetStaticMethodID)
(JNIEnv *env, jclass clazz, const char *name, const char *sig);
jobject (JNICALL *CallStaticObjectMethod)
(JNIEnv *env, jclass clazz, jmethodID methodID, ...);
jobject (JNICALL *CallStaticObjectMethodV)
(JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
jobject (JNICALL *CallStaticObjectMethodA)
(JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
jboolean (JNICALL *CallStaticBooleanMethod)
(JNIEnv *env, jclass clazz, jmethodID methodID, ...);
jboolean (JNICALL *CallStaticBooleanMethodV)
(JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
jboolean (JNICALL *CallStaticBooleanMethodA)
(JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
jbyte (JNICALL *CallStaticByteMethod)
(JNIEnv *env, jclass clazz, jmethodID methodID, ...);
jbyte (JNICALL *CallStaticByteMethodV)
(JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
jbyte (JNICALL *CallStaticByteMethodA)
(JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
jchar (JNICALL *CallStaticCharMethod)
(JNIEnv *env, jclass clazz, jmethodID methodID, ...);
jchar (JNICALL *CallStaticCharMethodV)
(JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
jchar (JNICALL *CallStaticCharMethodA)
(JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
jshort (JNICALL *CallStaticShortMethod)
(JNIEnv *env, jclass clazz, jmethodID methodID, ...);
jshort (JNICALL *CallStaticShortMethodV)
(JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
jshort (JNICALL *CallStaticShortMethodA)
(JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
jint (JNICALL *CallStaticIntMethod)
(JNIEnv *env, jclass clazz, jmethodID methodID, ...);
jint (JNICALL *CallStaticIntMethodV)
(JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
jint (JNICALL *CallStaticIntMethodA)
(JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
jlong (JNICALL *CallStaticLongMethod)
(JNIEnv *env, jclass clazz, jmethodID methodID, ...);
jlong (JNICALL *CallStaticLongMethodV)
(JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
jlong (JNICALL *CallStaticLongMethodA)
(JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
jfloat (JNICALL *CallStaticFloatMethod)
(JNIEnv *env, jclass clazz, jmethodID methodID, ...);
jfloat (JNICALL *CallStaticFloatMethodV)
(JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
jfloat (JNICALL *CallStaticFloatMethodA)
(JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
jdouble (JNICALL *CallStaticDoubleMethod)
(JNIEnv *env, jclass clazz, jmethodID methodID, ...);
jdouble (JNICALL *CallStaticDoubleMethodV)
(JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
jdouble (JNICALL *CallStaticDoubleMethodA)
(JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
void (JNICALL *CallStaticVoidMethod)
(JNIEnv *env, jclass cls, jmethodID methodID, ...);
void (JNICALL *CallStaticVoidMethodV)
(JNIEnv *env, jclass cls, jmethodID methodID, va_list args);
void (JNICALL *CallStaticVoidMethodA)
(JNIEnv *env, jclass cls, jmethodID methodID, const jvalue * args);
jfieldID (JNICALL *GetStaticFieldID)
(JNIEnv *env, jclass clazz, const char *name, const char *sig);
jobject (JNICALL *GetStaticObjectField)
(JNIEnv *env, jclass clazz, jfieldID fieldID);
jboolean (JNICALL *GetStaticBooleanField)
(JNIEnv *env, jclass clazz, jfieldID fieldID);
jbyte (JNICALL *GetStaticByteField)
(JNIEnv *env, jclass clazz, jfieldID fieldID);
jchar (JNICALL *GetStaticCharField)
(JNIEnv *env, jclass clazz, jfieldID fieldID);
jshort (JNICALL *GetStaticShortField)
(JNIEnv *env, jclass clazz, jfieldID fieldID);
jint (JNICALL *GetStaticIntField)
(JNIEnv *env, jclass clazz, jfieldID fieldID);
jlong (JNICALL *GetStaticLongField)
(JNIEnv *env, jclass clazz, jfieldID fieldID);
jfloat (JNICALL *GetStaticFloatField)
(JNIEnv *env, jclass clazz, jfieldID fieldID);
jdouble (JNICALL *GetStaticDoubleField)
(JNIEnv *env, jclass clazz, jfieldID fieldID);
void (JNICALL *SetStaticObjectField)
(JNIEnv *env, jclass clazz, jfieldID fieldID, jobject value);
void (JNICALL *SetStaticBooleanField)
(JNIEnv *env, jclass clazz, jfieldID fieldID, jboolean value);
void (JNICALL *SetStaticByteField)
(JNIEnv *env, jclass clazz, jfieldID fieldID, jbyte value);
void (JNICALL *SetStaticCharField)
(JNIEnv *env, jclass clazz, jfieldID fieldID, jchar value);
void (JNICALL *SetStaticShortField)
(JNIEnv *env, jclass clazz, jfieldID fieldID, jshort value);
void (JNICALL *SetStaticIntField)
(JNIEnv *env, jclass clazz, jfieldID fieldID, jint value);
void (JNICALL *SetStaticLongField)
(JNIEnv *env, jclass clazz, jfieldID fieldID, jlong value);
void (JNICALL *SetStaticFloatField)
(JNIEnv *env, jclass clazz, jfieldID fieldID, jfloat value);
void (JNICALL *SetStaticDoubleField)
(JNIEnv *env, jclass clazz, jfieldID fieldID, jdouble value);
jstring (JNICALL *NewString)
(JNIEnv *env, const jchar *unicode, jsize len);
jsize (JNICALL *GetStringLength)
(JNIEnv *env, jstring str);
const jchar *(JNICALL *GetStringChars)
(JNIEnv *env, jstring str, jboolean *isCopy);
void (JNICALL *ReleaseStringChars)
(JNIEnv *env, jstring str, const jchar *chars);
jstring (JNICALL *NewStringUTF)
(JNIEnv *env, const char *utf);
jsize (JNICALL *GetStringUTFLength)
(JNIEnv *env, jstring str);
const char* (JNICALL *GetStringUTFChars)
(JNIEnv *env, jstring str, jboolean *isCopy);
void (JNICALL *ReleaseStringUTFChars)
(JNIEnv *env, jstring str, const char* chars);
jsize (JNICALL *GetArrayLength)
(JNIEnv *env, jarray array);
jobjectArray (JNICALL *NewObjectArray)
(JNIEnv *env, jsize len, jclass clazz, jobject init);
jobject (JNICALL *GetObjectArrayElement)
(JNIEnv *env, jobjectArray array, jsize index);
void (JNICALL *SetObjectArrayElement)
(JNIEnv *env, jobjectArray array, jsize index, jobject val);
jbooleanArray (JNICALL *NewBooleanArray)
(JNIEnv *env, jsize len);
jbyteArray (JNICALL *NewByteArray)
(JNIEnv *env, jsize len);
jcharArray (JNICALL *NewCharArray)
(JNIEnv *env, jsize len);
jshortArray (JNICALL *NewShortArray)
(JNIEnv *env, jsize len);
jintArray (JNICALL *NewIntArray)
(JNIEnv *env, jsize len);
jlongArray (JNICALL *NewLongArray)
(JNIEnv *env, jsize len);
jfloatArray (JNICALL *NewFloatArray)
(JNIEnv *env, jsize len);
jdoubleArray (JNICALL *NewDoubleArray)
(JNIEnv *env, jsize len);
jboolean * (JNICALL *GetBooleanArrayElements)
(JNIEnv *env, jbooleanArray array, jboolean *isCopy);
jbyte * (JNICALL *GetByteArrayElements)
(JNIEnv *env, jbyteArray array, jboolean *isCopy);
jchar * (JNICALL *GetCharArrayElements)
(JNIEnv *env, jcharArray array, jboolean *isCopy);
jshort * (JNICALL *GetShortArrayElements)
(JNIEnv *env, jshortArray array, jboolean *isCopy);
jint * (JNICALL *GetIntArrayElements)
(JNIEnv *env, jintArray array, jboolean *isCopy);
jlong * (JNICALL *GetLongArrayElements)
(JNIEnv *env, jlongArray array, jboolean *isCopy);
jfloat * (JNICALL *GetFloatArrayElements)
(JNIEnv *env, jfloatArray array, jboolean *isCopy);
jdouble * (JNICALL *GetDoubleArrayElements)
(JNIEnv *env, jdoubleArray array, jboolean *isCopy);
void (JNICALL *ReleaseBooleanArrayElements)
(JNIEnv *env, jbooleanArray array, jboolean *elems, jint mode);
void (JNICALL *ReleaseByteArrayElements)
(JNIEnv *env, jbyteArray array, jbyte *elems, jint mode);
void (JNICALL *ReleaseCharArrayElements)
(JNIEnv *env, jcharArray array, jchar *elems, jint mode);
void (JNICALL *ReleaseShortArrayElements)
(JNIEnv *env, jshortArray array, jshort *elems, jint mode);
void (JNICALL *ReleaseIntArrayElements)
(JNIEnv *env, jintArray array, jint *elems, jint mode);
void (JNICALL *ReleaseLongArrayElements)
(JNIEnv *env, jlongArray array, jlong *elems, jint mode);
void (JNICALL *ReleaseFloatArrayElements)
(JNIEnv *env, jfloatArray array, jfloat *elems, jint mode);
void (JNICALL *ReleaseDoubleArrayElements)
(JNIEnv *env, jdoubleArray array, jdouble *elems, jint mode);
void (JNICALL *GetBooleanArrayRegion)
(JNIEnv *env, jbooleanArray array, jsize start, jsize l, jboolean *buf);
void (JNICALL *GetByteArrayRegion)
(JNIEnv *env, jbyteArray array, jsize start, jsize len, jbyte *buf);
void (JNICALL *GetCharArrayRegion)
(JNIEnv *env, jcharArray array, jsize start, jsize len, jchar *buf);
void (JNICALL *GetShortArrayRegion)
(JNIEnv *env, jshortArray array, jsize start, jsize len, jshort *buf);
void (JNICALL *GetIntArrayRegion)
(JNIEnv *env, jintArray array, jsize start, jsize len, jint *buf);
void (JNICALL *GetLongArrayRegion)
(JNIEnv *env, jlongArray array, jsize start, jsize len, jlong *buf);
void (JNICALL *GetFloatArrayRegion)
(JNIEnv *env, jfloatArray array, jsize start, jsize len, jfloat *buf);
void (JNICALL *GetDoubleArrayRegion)
(JNIEnv *env, jdoubleArray array, jsize start, jsize len, jdouble *buf);
void (JNICALL *SetBooleanArrayRegion)
(JNIEnv *env, jbooleanArray array, jsize start, jsize l, const jboolean *buf);
void (JNICALL *SetByteArrayRegion)
(JNIEnv *env, jbyteArray array, jsize start, jsize len, const jbyte *buf);
void (JNICALL *SetCharArrayRegion)
(JNIEnv *env, jcharArray array, jsize start, jsize len, const jchar *buf);
void (JNICALL *SetShortArrayRegion)
(JNIEnv *env, jshortArray array, jsize start, jsize len, const jshort *buf);
void (JNICALL *SetIntArrayRegion)
(JNIEnv *env, jintArray array, jsize start, jsize len, const jint *buf);
void (JNICALL *SetLongArrayRegion)
(JNIEnv *env, jlongArray array, jsize start, jsize len, const jlong *buf);
void (JNICALL *SetFloatArrayRegion)
(JNIEnv *env, jfloatArray array, jsize start, jsize len, const jfloat *buf);
void (JNICALL *SetDoubleArrayRegion)
(JNIEnv *env, jdoubleArray array, jsize start, jsize len, const jdouble *buf);
jint (JNICALL *RegisterNatives)
(JNIEnv *env, jclass clazz, const JNINativeMethod *methods,
jint nMethods);
jint (JNICALL *UnregisterNatives)
(JNIEnv *env, jclass clazz);
jint (JNICALL *MonitorEnter)
(JNIEnv *env, jobject obj);
jint (JNICALL *MonitorExit)
(JNIEnv *env, jobject obj);
jint (JNICALL *GetJavaVM)
(JNIEnv *env, JavaVM **vm);
void (JNICALL *GetStringRegion)
(JNIEnv *env, jstring str, jsize start, jsize len, jchar *buf);
void (JNICALL *GetStringUTFRegion)
(JNIEnv *env, jstring str, jsize start, jsize len, char *buf);
void * (JNICALL *GetPrimitiveArrayCritical)
(JNIEnv *env, jarray array, jboolean *isCopy);
void (JNICALL *ReleasePrimitiveArrayCritical)
(JNIEnv *env, jarray array, void *carray, jint mode);
const jchar * (JNICALL *GetStringCritical)
(JNIEnv *env, jstring string, jboolean *isCopy);
void (JNICALL *ReleaseStringCritical)
(JNIEnv *env, jstring string, const jchar *cstring);
jweak (JNICALL *NewWeakGlobalRef)
(JNIEnv *env, jobject obj);
void (JNICALL *DeleteWeakGlobalRef)
(JNIEnv *env, jweak ref);
jboolean (JNICALL *ExceptionCheck)
(JNIEnv *env);
jobject (JNICALL *NewDirectByteBuffer)
(JNIEnv* env, void* address, jlong capacity);
void* (JNICALL *GetDirectBufferAddress)
(JNIEnv* env, jobject buf);
jlong (JNICALL *GetDirectBufferCapacity)
(JNIEnv* env, jobject buf);
jobjectRefType (JNICALL *GetObjectRefType)
(JNIEnv* env, jobject obj);
"""
def _translate_type(tname):
pointer = 0
while tname.startswith('*'):
tname = tname[len('*'):]
pointer += 1
if tname == 'void':
if pointer > 0:
pointer -= 1
tname = 'void*'
try:
pytname = {
'int': 'ctypes.c_int',
'JNIEnv': '_JNIEnv',
'char': 'ctypes.c_char',
'jboolean': '_jboolean',
'jbyte': '_jbyte',
'jchar': '_jchar',
'jshort': '_jshort',
'jint': '_jint',
'jlong': '_jlong',
'jfloat': '_jfloat',
'jdouble': '_jdouble',
'jsize': '_jsize',
'jobject': '_jobject',
'jclass': '_jclass',
'jarray': '_jarray',
'jobjectArray': '_jobjectArray',
'jbooleanArray': '_jbooleanArray',
'jbyteArray': '_jbyteArray',
'jcharArray': '_jcharArray',
'jshortArray': '_jintArray',
'jintArray': '_jintArray',
'jlongArray': '_jlongArray',
'jfloatArray': '_jfloatArray',
'jdoubleArray': '_jdoubleArray',
'jobjectArray': '_jobjectArray',
'JNINativeMethod': '_JNINativeMethod',
'JavaVM': '_JavaVM',
'jmethodID': '_jmethodID',
'jfieldID': '_jfieldID',
'jthrowable': '_jthrowable',
'jstring': '_jstring',
'jvalue': '_jvalue',
'void*': 'ctypes.c_void_p',
'jweak': '_jweak',
}[tname]
except KeyError:
raise NotImplementedError('Cannot translate type ' + tname)
s = ''.join(itertools.repeat('ctypes.POINTER(', pointer))
s += pytname
s += ''.join(itertools.repeat(')', pointer))
return s
def _gen_method_repr(mname, args, prefix=''):
if any(tname in ('...', 'va_list') for tname,name in args):
return prefix + '# ' + mname + ' skipped because of varargs'
res = prefix + '(' + repr(mname) + ',\n'
res += prefix + ' ' * 4 + 'ctypes.POINTER(ctypes.CFUNCTYPE(\n'
res += ''.join(prefix + ' ' * 8 + (_translate_type(typen) + ',').ljust(32) + ' # ' + name + '\n'
for typen, name in args)
res += prefix + ' ' * 4 + '))\n'
res += prefix + '),'
return res
def _extract_arg(arg_str):
""" Extract (type name, argument name) from a string """
s = arg_str.strip()
if s == '...':
return ('...', '')
if s.startswith('const '):
s = s[len('const '):]
p = ''
if s.count(' * ') == 1:
s = s.replace(' * ', ' ')
p = '*'
typen,name = s.split(' ')
p += '*' * name.count('*')
name = name.replace('*', '')
p += '*' * typen.count('*')
typen = typen.replace('*', '')
assert '*' not in typen
assert '*' not in name
return (p + typen, name)
def gen_ctypes(s, prefix):
res = ''
for mdef in s.split(';'):
if not mdef.strip():
continue
m = re.match('''
\s*(?P<return>[a-zA-Z]+)\s*.*
[(]JNICALL \s* [*](?P<name>[a-zA-Z]+)[)]
\s*[(](?P<args>.*)[)]\s*$
''',
mdef,
flags=re.MULTILINE | re.VERBOSE | re.DOTALL)
if not m:
raise ValueError('Invalid input ' + repr(mdef))
args = list(map(_extract_arg, m.group('args').split(',')))
mrepr = _gen_method_repr(m.group('name'), args, prefix)
res += mrepr + '\n'
return res
if __name__ == '__main__':
import sys
try:
pfx = sys.argv[1]
except IndexError:
pfx = ''
print(gen_ctypes(JNINativeInterface, pfx))
| {
"content_hash": "08409a324633ae097f0ce488fcbfe5bd",
"timestamp": "",
"source": "github",
"line_count": 677,
"max_line_length": 100,
"avg_line_length": 40.55391432791728,
"alnum_prop": 0.6563831724640321,
"repo_name": "phihag/jippy",
"id": "0b174425114ad8e65c51badbc3923614c349c95e",
"size": "27478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/genctypes.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93886"
}
],
"symlink_target": ""
} |
import os
import gevent
import logging
import kazoo.client
import kazoo.exceptions
import kazoo.handlers.gevent
import kazoo.recipe.election
from kazoo.client import KazooState
from kazoo.retry import KazooRetry, ForceRetryError
from kazoo.recipe.counter import Counter
from bitarray import bitarray
from cfgm_common.exceptions import ResourceExhaustionError,\
ResourceExistsError, OverQuota
from gevent.lock import BoundedSemaphore
import datetime
import uuid
import sys
LOG_DIR = '/var/log/contrail/'
class IndexAllocator(object):
def __init__(self, zookeeper_client, path, size=0, start_idx=0,
reverse=False, alloc_list=None, max_alloc=0):
self._size = size
self._start_idx = start_idx
if alloc_list is None:
self._alloc_list = [{'start': start_idx, 'end': start_idx+size}]
else:
sorted_alloc_list = sorted(alloc_list, key=lambda k: k['start'])
self._alloc_list = sorted_alloc_list
size = self._get_range_size(self._alloc_list)
if max_alloc == 0:
self._max_alloc = size
else:
self._max_alloc = max_alloc
self._zookeeper_client = zookeeper_client
self._path = path
self._in_use = bitarray('0')
self._reverse = reverse
for idx in self._zookeeper_client.get_children(path):
idx_int = self._get_bit_from_zk_index(int(idx))
if idx_int >= 0:
self._set_in_use(self._in_use, idx_int)
# end for idx
# end __init__
# Given a set of ranges (alloc_list), return
# the cumulative count of the ranges.
def _get_range_size(self, alloc_list):
alloc_count = len(alloc_list)
size = 0
# check for overlap in alloc_list --TODO
for alloc_idx in range(0, alloc_count - 1):
idx_start_addr = alloc_list[alloc_idx]['start']
idx_end_addr = alloc_list[alloc_idx]['end']
next_start_addr = alloc_list[alloc_idx+1]['start']
if next_start_addr <= idx_end_addr:
raise Exception(
'Allocation Lists Overlapping: %s' % (alloc_list))
size += idx_end_addr - idx_start_addr + 1
size += (alloc_list[alloc_count-1]['end'] -
alloc_list[alloc_count-1]['start'] + 1)
return size
def _has_ranges_shrunk(self, old_list, new_list):
if len(old_list) > len(new_list):
return True
for old_pool, new_pool in zip(old_list, new_list):
if (new_pool['start'] > old_pool['start'] or
new_pool['end'] < old_pool['end']):
return True
return False
# Reallocates the indexes to a new set of indexes provided by
# the user.
# Limitation -
# 1. No. of alloc pools needs to be constant
# For example, [10-20] cannot become [10-20],[25-30]
# 2. Every alloc pool can only expand but not shrink
# For ex, [10-20] can become [9-20] or [10-22] or [9-22]
# but not [15-17]
#
def reallocate(self, new_alloc_list):
sorted_alloc_list = sorted(new_alloc_list,
key=lambda k: k['start'])
if self._has_ranges_shrunk(self._alloc_list, sorted_alloc_list):
raise Exception('Indexes allocated cannot be shrunk: %s' %
(self._alloc_list))
size = self._get_range_size(sorted_alloc_list)
self._max_alloc = size
new_in_use = bitarray(0)
for idx, bitval in enumerate(self._in_use):
if not bitval:
continue
zk_idx = self._get_zk_index_from_bit(idx)
idx_int = self._get_bit_from_zk_index(zk_idx, sorted_alloc_list)
if idx_int >= 0:
self._set_in_use(new_in_use, idx_int)
self._in_use = new_in_use
# end for idx
def _get_zk_index_from_bit(self, idx, alloc_list=None):
if not alloc_list:
alloc_list = self._alloc_list
size = idx
if self._reverse:
for alloc in reversed(alloc_list):
size -= alloc['end'] - alloc['start'] + 1
if size < 0:
return alloc['start'] - size - 1
else:
for alloc in alloc_list:
size -= alloc['end'] - alloc['start'] + 1
if size < 0:
return alloc['end']+size + 1
raise ResourceExhaustionError(
'Cannot get zk index from bit %s' % (idx))
# end _get_zk_index
def _get_bit_from_zk_index(self, idx, alloc_list=None):
if not alloc_list:
alloc_list = self._alloc_list
size = 0
if self._reverse:
for alloc in reversed(alloc_list):
if alloc['start'] <= idx <= alloc['end']:
return alloc['end'] - idx + size
size += alloc['end'] - alloc['start'] + 1
pass
else:
for alloc in alloc_list:
if alloc['start'] <= idx <= alloc['end']:
return idx - alloc['start'] + size
size += alloc['end'] - alloc['start'] + 1
return -1
# end _get_bit_from_zk_index
def _set_in_use(self, array, bitnum):
# if the index is higher than _max_alloc, do not use the bitarray, in
# order to reduce the size of the bitarray. Otherwise, set the bit
# corresponding to idx to 1 and extend the _in_use bitarray if needed
if bitnum > self._max_alloc:
return
if bitnum >= array.length():
temp = bitarray(bitnum - array.length())
temp.setall(0)
temp.append('1')
array.extend(temp)
else:
array[bitnum] = 1
# end _set_in_use
def _reset_in_use(self, bitnum):
# if the index is higher than _max_alloc, do not use the bitarray, in
# order to reduce the size of the bitarray. Otherwise, set the bit
# corresponding to idx to 1 and extend the _in_use bitarray if needed
if bitnum > self._max_alloc:
return
if bitnum >= self._in_use.length():
return
else:
self._in_use[bitnum] = 0
# end _reset_in_use
def set_in_use(self, idx):
bit_idx = self._get_bit_from_zk_index(idx)
if bit_idx < 0:
return
self._set_in_use(self._in_use, bit_idx)
# end set_in_use
def reset_in_use(self, idx):
bit_idx = self._get_bit_from_zk_index(idx)
if bit_idx < 0:
return
self._reset_in_use(bit_idx)
# end reset_in_use
def get_alloc_count(self):
return self._in_use.count()
# end get_alloc_count
def _alloc_from_pools(self, pools=None):
if not pools:
raise ResourceExhaustionError()
if self._reverse:
pools = list(reversed(pools))
for pool in pools:
last_idx = self._in_use.length() - 1
pool_start = pool['start']
pool_end = pool['end']
pool_size = pool_end - pool_start + 1
if self._reverse:
start_zk_idx = pool_end
end_zk_idx = pool_start
else:
start_zk_idx = pool_start
end_zk_idx = pool_end
start_bit_idx = self._get_bit_from_zk_index(start_zk_idx)
end_bit_idx = self._get_bit_from_zk_index(end_zk_idx)
# if bitarray is less then start_bit_index,
# extend bit array to start_bit_idx and use that idx
if last_idx < start_bit_idx:
temp = bitarray(start_bit_idx - last_idx)
temp.setall(0)
self._in_use.extend(temp)
self._in_use[start_bit_idx] = 1
return start_bit_idx
# if bitarray is in between start_bit_idx and end_bit_idx
if last_idx >= start_bit_idx and last_idx <= end_bit_idx:
# we need to slice part of bitarray from
# start of the pool and end of array
pool_bitarray = self._in_use[start_bit_idx:]
else:
pool_bitarray = self._in_use[
start_bit_idx:end_bit_idx+1]
if pool_bitarray.all():
if last_idx >= end_bit_idx:
continue
idx = self._in_use.length()
self._in_use.append(1)
else:
idx = pool_bitarray.index(0)
idx += start_bit_idx
self._in_use[idx] = 1
return idx
raise ResourceExhaustionError()
# end _alloc_from_pools
def alloc(self, value=None, pools=None):
if pools:
idx = self._alloc_from_pools(pools)
else:
# Allocates a index from the allocation list
if self._in_use.all():
idx = self._in_use.length()
if idx > self._max_alloc:
raise ResourceExhaustionError()
self._in_use.append(1)
else:
idx = self._in_use.index(0)
self._in_use[idx] = 1
idx = self._get_zk_index_from_bit(idx)
try:
# Create a node at path and return its integer value
id_str = "%(#)010d" % {'#': idx}
self._zookeeper_client.create_node(self._path + id_str, value)
return idx
except ResourceExistsError:
return self.alloc(value, pools)
# end alloc
def reserve(self, idx, value=None):
# Reserves the requested index if available
if not self._start_idx <= idx < self._start_idx + self._size:
return None
try:
# Create a node at path and return its integer value
id_str = "%(#)010d" % {'#': idx}
self._zookeeper_client.create_node(self._path + id_str, value)
self.set_in_use(idx)
return idx
except ResourceExistsError:
self.set_in_use(idx)
existing_value = self.read(idx)
if (value == existing_value):
# idempotent reserve
return idx
msg = 'For index %s reserve conflicts with existing value %s.' \
%(idx, existing_value)
self._zookeeper_client.syslog(msg, level='notice')
raise
# end reserve
def delete(self, idx):
id_str = "%(#)010d" % {'#': idx}
self._zookeeper_client.delete_node(self._path + id_str)
bit_idx = self._get_bit_from_zk_index(idx)
if 0 <= bit_idx < self._in_use.length():
self._in_use[bit_idx] = 0
# end delete
def read(self, idx):
id_str = "%(#)010d" % {'#': idx}
id_val = self._zookeeper_client.read_node(self._path+id_str)
if id_val is not None:
bit_idx = self._get_bit_from_zk_index(idx)
if bit_idx >= 0:
self._set_in_use(self._in_use, bit_idx)
return id_val
# end read
def empty(self):
return not self._in_use.any()
# end empty
@classmethod
def delete_all(cls, zookeeper_client, path):
try:
zookeeper_client.delete_node(path, recursive=True)
except kazoo.exceptions.NotEmptyError:
#TODO: Add retries for NotEmptyError
zookeeper_client.syslog("NotEmptyError while deleting %s" % path)
# end delete_all
#end class IndexAllocator
class ZookeeperCounter(Counter):
def __init__(self, client, path, max_count=sys.maxint, default=0):
super(ZookeeperCounter, self).__init__(client, path, default)
self.max_count = max_count
def _inner_change(self, value):
data, version = self._value()
data = repr(data + value).encode('ascii')
if int(data) > self.max_count:
raise OverQuota()
try:
self.client.set(self.path, data, version=version)
except kazoo.exceptions.BadVersionError: # pragma: nocover
raise ForceRetryError()
# end class ZookeeperCounter
class ZookeeperClient(object):
def __init__(self, module, server_list, logging_fn=None, zk_timeout=400,
log_response_time=None):
# logging
logger = logging.getLogger(module)
logger.setLevel(logging.DEBUG)
try:
handler = logging.handlers.RotatingFileHandler(
LOG_DIR + module + '-zk.log', maxBytes=10*1024*1024, backupCount=5)
except IOError:
print "Cannot open log file in %s" %(LOG_DIR)
else:
log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
handler.setFormatter(log_format)
logger.addHandler(handler)
if logging_fn:
self.log = logging_fn
else:
self.log = self.syslog
self.log_response_time = log_response_time
# KazooRetry to retry keeper CRUD operations
self._retry = KazooRetry(max_tries=None, max_delay=300,
sleep_func=gevent.sleep)
self._zk_client = kazoo.client.KazooClient(
server_list,
timeout=zk_timeout,
handler=kazoo.handlers.gevent.SequentialGeventHandler(),
logger=logger,
connection_retry=self._retry,
command_retry=self._retry)
self._zk_client.add_listener(self._zk_listener)
self._logger = logger
self._election = None
self._server_list = server_list
self._conn_state = None
self._sandesh_connection_info_update(status='INIT', message='')
self._lost_cb = None
self._suspend_cb = None
self.delete_node = self._response_time(self.delete_node, "DELETE")
self.create_node = self._response_time(self.create_node, "CREATE")
self.read_node = self._response_time(self.read_node, "READ")
self.get_children= self._response_time(self.get_children, "GET_CHILDREN")
self.exists = self._response_time(self.exists, "EXISTS")
self.connect()
# end __init__
def _response_time(self, func, oper):
def wrapper(*args, **kwargs):
# Measure the time
self.start_time = datetime.datetime.now()
val = func(*args, **kwargs)
self.end_time = datetime.datetime.now()
if self.log_response_time:
self.log_response_time(self.end_time - self.start_time, oper)
return val
# Measure the time again
return wrapper
# start
def connect(self):
while True:
try:
self._zk_client.start()
break
except gevent.event.Timeout as e:
self._zk_client.close()
# Update connection info
self._sandesh_connection_info_update(status='DOWN',
message=str(e))
gevent.sleep(1)
# Zookeeper is also throwing exception due to delay in master election
except Exception as e:
self._zk_client.stop()
self._zk_client.close()
# Update connection info
self._sandesh_connection_info_update(status='DOWN',
message=str(e))
gevent.sleep(1)
# Update connection info
self._sandesh_connection_info_update(status='UP', message='')
# end
def is_connected(self):
return self._zk_client.state == KazooState.CONNECTED
# end is_connected
def syslog(self, msg, *args, **kwargs):
if not self._logger:
return
level = kwargs.get('level', 'info')
if isinstance(level, int):
from pysandesh.sandesh_logger import SandeshLogger
level = SandeshLogger.get_py_logger_level(level)
self._logger.log(level, msg)
return
log_method = getattr(self._logger, level, self._logger.info)
log_method(msg)
# end syslog
def set_lost_cb(self, lost_cb=None):
# set a callback to be called when kazoo state is lost
# set to None for default action
self._lost_cb = lost_cb
# end set_lost_cb
def set_suspend_cb(self, suspend_cb=None):
# set a callback to be called when kazoo state is suspend
# set to None for default action
self._suspend_cb = suspend_cb
# end set_suspend_cb
def _zk_listener(self, state):
if state == KazooState.CONNECTED:
if self._election:
self._election.cancel()
# Update connection info
self._sandesh_connection_info_update(status='UP', message='')
elif state == KazooState.LOST:
# Lost the session with ZooKeeper Server
# Best of option we have is to exit the process and restart all
# over again
self._sandesh_connection_info_update(status='DOWN',
message='Connection to Zookeeper lost')
if self._lost_cb:
self._lost_cb()
else:
os._exit(2)
elif state == KazooState.SUSPENDED:
# Update connection info
self._sandesh_connection_info_update(status='INIT',
message = 'Connection to zookeeper lost. Retrying')
if self._suspend_cb:
self._suspend_cb()
# end
def master_election(self, path, identifier, func, *args, **kwargs):
self._election = self._zk_client.Election(path, identifier)
self._election.run(func, *args, **kwargs)
# end master_election
def quota_counter(self, path, max_count=sys.maxint, default=0):
return ZookeeperCounter(self._zk_client, path, max_count, default=default)
def create_node(self, path, value=None):
try:
if value is None:
value = uuid.uuid4()
retry = self._retry.copy()
retry(self._zk_client.create, path, str(value), makepath=True)
except kazoo.exceptions.NodeExistsError:
current_value = self.read_node(path)
if current_value == value:
return True;
raise ResourceExistsError(path, str(current_value), 'zookeeper')
# end create_node
def delete_node(self, path, recursive=False):
try:
retry = self._retry.copy()
retry(self._zk_client.delete, path, recursive=recursive)
except kazoo.exceptions.NoNodeError:
pass
# end delete_node
def read_node(self, path, include_timestamp=False):
try:
retry = self._retry.copy()
value = retry(self._zk_client.get, path)
if include_timestamp:
return value
return value[0]
except Exception:
return None
# end read_node
def get_children(self, path):
try:
retry = self._retry.copy()
return retry(self._zk_client.get_children, path)
except Exception:
return []
# end read_node
def exists(self, path):
try:
retry = self._retry.copy()
return retry(self._zk_client.exists, path)
except Exception:
return []
# end exists
def _sandesh_connection_info_update(self, status, message):
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus
from pysandesh.gen_py.process_info.ttypes import ConnectionType as ConnType
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
new_conn_state = getattr(ConnectionStatus, status)
ConnectionState.update(conn_type = ConnType.ZOOKEEPER,
name = 'Zookeeper', status = new_conn_state,
message = message,
server_addrs = self._server_list.split(','))
if (self._conn_state and self._conn_state != ConnectionStatus.DOWN and
new_conn_state == ConnectionStatus.DOWN):
msg = 'Connection to Zookeeper down: %s' %(message)
self.log(msg, level=SandeshLevel.SYS_ERR)
if (self._conn_state and self._conn_state != new_conn_state and
new_conn_state == ConnectionStatus.UP):
msg = 'Connection to Zookeeper ESTABLISHED'
self.log(msg, level=SandeshLevel.SYS_NOTICE)
self._conn_state = new_conn_state
# end _sandesh_connection_info_update
# end class ZookeeperClient
| {
"content_hash": "ad08b441da1f94476d94298b9471c33c",
"timestamp": "",
"source": "github",
"line_count": 575,
"max_line_length": 83,
"avg_line_length": 36.11826086956522,
"alnum_prop": 0.5514734206471494,
"repo_name": "nischalsheth/contrail-controller",
"id": "f61829f829a9570d8a5d06c73bf918b795ce37b2",
"size": "20837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/config/common/zkclient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "88437"
},
{
"name": "C++",
"bytes": "23392370"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "GDB",
"bytes": "44610"
},
{
"name": "Go",
"bytes": "45352"
},
{
"name": "HTML",
"bytes": "519766"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "LLVM",
"bytes": "2937"
},
{
"name": "Lua",
"bytes": "20359"
},
{
"name": "Makefile",
"bytes": "12449"
},
{
"name": "Python",
"bytes": "7781013"
},
{
"name": "Roff",
"bytes": "41295"
},
{
"name": "Ruby",
"bytes": "13596"
},
{
"name": "Shell",
"bytes": "63970"
},
{
"name": "Thrift",
"bytes": "5666"
},
{
"name": "Yacc",
"bytes": "7737"
}
],
"symlink_target": ""
} |
import json
import os
import unittest
from nose.plugins.attrib import attr
import cloudferry_devlab.tests.config as config
from cloudferry_devlab.tests import functional_test
class VmMigration(functional_test.FunctionalTest):
def setUp(self):
super(VmMigration, self).setUp()
src_vms = self.filter_vms()
if not src_vms:
self.skipTest("Nothing to migrate - source vm list is empty")
self.dst_vms = self.dst_cloud.novaclient.servers.list(
search_opts={'all_tenants': 1})
if not self.dst_vms:
self.fail("No VM's on destination. Either Migration was not "
"successful for resource 'VM' or it was not initiated")
src_vms = [vm for vm in src_vms if vm.status != 'ERROR' and
self.tenant_exists(self.src_cloud.keystoneclient,
vm.tenant_id)]
self.dst_vm_indexes = []
for vm in src_vms:
if vm.name not in config.vms_not_in_filter:
self.dst_vm_indexes.append(
[x.name for x in self.dst_vms].index(vm.name))
file_path = os.path.join(self.cloudferry_dir,
'pre_migration_vm_states.json')
with open(file_path) as data_file:
self.before_migr_states = json.load(data_file)
self.filter_vms = self.filtering_utils.filter_vms(src_vms)
@attr(migrated_tenant=['admin', 'tenant1', 'tenant2'])
def test_vms_not_in_filter_stay_active_on_src(self):
"""Validate VMs which not icluded in filter stays active on SRC cloud.
"""
original_states = self.before_migr_states
for vm in config.vms_not_in_filter:
vm_list = [x for x in self.src_cloud.novaclient.servers.list(
search_opts={'all_tenants': 1}) if x.name == vm]
for filtered_vm in vm_list:
self.assertTrue(
filtered_vm.status == original_states[filtered_vm.name],
msg="Vm %s has wrong state" % filtered_vm.name)
@attr(migrated_tenant=['admin', 'tenant1', 'tenant2'])
def test_vm_not_in_filter_did_not_migrate(self):
"""Validate VMs not included in filter file weren't migrated."""
dst_vms = [x.name for x in self.dst_cloud.novaclient.servers.list(
search_opts={'all_tenants': 1})]
for vm in config.vms_not_in_filter:
self.assertTrue(vm not in dst_vms,
'VM migrated despite that it was not included in '
'filter, VM info: \n{}'.format(vm))
@attr(migrated_tenant=['admin', 'tenant1', 'tenant2'])
def test_cold_migrate_vm_state(self):
"""Validate VMs were cold migrated with correct states."""
original_states = self.before_migr_states
for vm_name in original_states.keys():
if vm_name in config.vms_not_in_filter:
original_states.pop(vm_name)
src_vms = self.filter_vms[0]
for src_vm, vm_index in zip(src_vms, self.dst_vm_indexes):
if src_vm.name in original_states.keys():
if original_states[src_vm.name] == 'ACTIVE' or \
original_states[src_vm.name] == 'VERIFY_RESIZE':
self.assertTrue(
src_vm.status == 'SHUTOFF' and
self.dst_vms[vm_index].status == 'ACTIVE')
else:
self.assertTrue(
src_vm.status == 'SHUTOFF' and
self.dst_vms[vm_index].status == 'SHUTOFF')
else:
self.assertTrue(src_vm.status == 'SHUTOFF' and
self.dst_vms[vm_index].status == 'ACTIVE')
@attr(migrated_tenant=['admin', 'tenant1', 'tenant2'])
def test_cold_migrate_vm_ip(self):
"""Validate VMs were cold migrated with correct IPs."""
src_vms = self.filter_vms[0]
for src_vm, vm_index in zip(src_vms, self.dst_vm_indexes):
for src_net in src_vm.addresses:
for src_net_addr, dst_net_addr in zip(src_vm.addresses
[src_net],
self.dst_vms[vm_index]
.addresses[src_net]):
self.assertTrue(src_net_addr['addr'] ==
dst_net_addr['addr'])
@attr(migrated_tenant=['admin', 'tenant1', 'tenant2'])
def test_cold_migrate_vm_security_groups(self):
"""Validate VMs were cold migrated with correct security groups."""
src_vms = self.filter_vms[0]
for src_vm, vm_index in zip(src_vms, self.dst_vm_indexes):
dst_sec_group_names = [x['name'] for x in
self.dst_vms[vm_index].security_groups]
for security_group in src_vm.security_groups:
self.assertTrue(security_group['name'] in dst_sec_group_names)
@unittest.skip("Temporarily disabled: image's id changes after migrating")
def test_cold_migrate_vm_image_id(self):
"""Validate VMs were cold migrated with correct image ids."""
src_vms = self.filter_vms[0]
for src_vm, vm_index in zip(src_vms, self.dst_vm_indexes):
self.assertTrue(src_vm.image.id ==
self.dst_vms[vm_index].image.id)
| {
"content_hash": "79dc856394fedc48ce96067e754c5b3c",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 78,
"avg_line_length": 48.80357142857143,
"alnum_prop": 0.5525064032199049,
"repo_name": "mgrygoriev/CloudFerry",
"id": "64378ffd16602aee3cb2a86a5b15257a1ce4d071",
"size": "6041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudferry_devlab/cloudferry_devlab/tests/testcases/test_vm_migration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2615"
},
{
"name": "Python",
"bytes": "775433"
},
{
"name": "Ruby",
"bytes": "5181"
},
{
"name": "Shell",
"bytes": "34787"
}
],
"symlink_target": ""
} |
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer)
from azure.cli.testsdk.scenario_tests.decorators import AllowLargeResponse
class NWFlowLogScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='test_nw_flow_log_', location='centraluseuap')
@StorageAccountPreparer(name_prefix='testflowlog', location='centraluseuap', kind='StorageV2')
def test_nw_flow_log_create_vnetfl(self, resource_group, resource_group_location, storage_account):
self.kwargs.update({
'rg': resource_group,
'location': resource_group_location,
'storage_account': storage_account,
'vnet': 'vnet1',
'subnet': 'subnet1',
'nic': 'nic1',
'watcher_rg': 'NetworkWatcherRG',
'watcher_name': 'NetworkWatcher_{}'.format(resource_group_location),
'flow_log': 'flow_log_test',
'workspace': self.create_random_name('clitest', 20),
})
# enable network watcher
# self.cmd('network watcher configure -g {rg} --locations {location} --enabled')
# prepare the target resource
self.cmd('network vnet create -g {rg} -n {vnet}')
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet} -n {subnet} --address-prefix 10.0.0.0/24')
self.cmd('network nic create -g {rg} -n {nic} --vnet-name {vnet} --subnet {subnet}')
# prepare workspace
workspace = self.cmd('monitor log-analytics workspace create '
'--resource-group {rg} '
'--location eastus '
'--workspace-name {workspace} ').get_output_in_json()
self.kwargs.update({
'workspace_id': workspace['id']
})
#targetId as vnet
self.cmd('network watcher flow-log create '
'--location {location} '
'--resource-group {rg} '
'--vnet {vnet} '
'--storage-account {storage_account} '
'--workspace {workspace_id} '
'--name {flow_log} ')
self.cmd('network watcher flow-log list --location {location}')
# This output is Azure Management Resource formatted.
self.cmd('network watcher flow-log show --location {location} --name {flow_log}', checks=[
self.check('name', self.kwargs['flow_log']),
self.check('flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.workspaceResourceId',
self.kwargs['workspace_id']),
self.check_pattern('targetResourceId', '.*/{vnet}$'),
self.check('retentionPolicy.days', 0),
self.check('retentionPolicy.enabled', False),
])
#targetId as subnet
self.cmd('network watcher flow-log create '
'--location {location} '
'--resource-group {rg} '
'--vnet {vnet} '
'--subnet {subnet} '
'--storage-account {storage_account} '
'--workspace {workspace_id} '
'--name {flow_log} ')
self.cmd('network watcher flow-log list --location {location}')
# This output is Azure Management Resource formatted.
self.cmd('network watcher flow-log show --location {location} --name {flow_log}', checks=[
self.check('name', self.kwargs['flow_log']),
self.check('flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.workspaceResourceId',
self.kwargs['workspace_id']),
self.check_pattern('targetResourceId', '.*/{subnet}$'),
self.check('retentionPolicy.days', 0),
self.check('retentionPolicy.enabled', False),
])
#targetId as nic
self.cmd('network watcher flow-log create '
'--location {location} '
'--resource-group {rg} '
'--nic {nic} '
'--storage-account {storage_account} '
'--workspace {workspace_id} '
'--name {flow_log} ')
self.cmd('network watcher flow-log list --location {location}')
# This output is Azure Management Resource formatted.
self.cmd('network watcher flow-log show --location {location} --name {flow_log}', checks=[
self.check('name', self.kwargs['flow_log']),
self.check('flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.workspaceResourceId',
self.kwargs['workspace_id']),
self.check_pattern('targetResourceId', '.*/{nic}$'),
self.check('retentionPolicy.days', 0),
self.check('retentionPolicy.enabled', False),
])
@ResourceGroupPreparer(name_prefix='test_nw_flow_log_', location='eastus')
@StorageAccountPreparer(name_prefix='testflowlog', location='eastus', kind='StorageV2')
def test_nw_flow_log_create(self, resource_group, resource_group_location, storage_account):
self.kwargs.update({
'rg': resource_group,
'location': resource_group_location,
'storage_account': storage_account,
'nsg': 'nsg1',
'watcher_rg': 'NetworkWatcherRG',
'watcher_name': 'NetworkWatcher_{}'.format(resource_group_location),
'flow_log': 'flow_log_test',
'workspace': self.create_random_name('clitest', 20),
})
# enable network watcher
# self.cmd('network watcher configure -g {rg} --locations {location} --enabled')
# prepare the target resource
self.cmd('network nsg create -g {rg} -n {nsg}')
# prepare workspace
workspace = self.cmd('monitor log-analytics workspace create '
'--resource-group {rg} '
'--location {location} '
'--workspace-name {workspace} ').get_output_in_json()
self.kwargs.update({
'workspace_id': workspace['id']
})
self.cmd('network watcher flow-log create '
'--location {location} '
'--resource-group {rg} '
'--nsg {nsg} '
'--storage-account {storage_account} '
'--workspace {workspace_id} '
'--name {flow_log} ')
self.cmd('network watcher flow-log list --location {location}')
# This output is Azure Management Resource formatted.
self.cmd('network watcher flow-log show --location {location} --name {flow_log}', checks=[
self.check('name', self.kwargs['flow_log']),
self.check('flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.enabled', False),
self.check('flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.workspaceResourceId',
self.kwargs['workspace_id']),
self.check('retentionPolicy.days', 0),
self.check('retentionPolicy.enabled', False),
])
@ResourceGroupPreparer(name_prefix='test_nw_flow_log_', location='centraluseuap')
@StorageAccountPreparer(name_prefix='testflowlog', location='centraluseuap', kind='StorageV2')
@AllowLargeResponse(1024)
def test_nw_flow_log_delete_vnetfl(self, resource_group, resource_group_location, storage_account):
self.kwargs.update({
'rg': resource_group,
'location': resource_group_location,
'storage_account': storage_account,
'vnet': 'vnet1',
'subnet': 'subnet1',
'nic': 'nic1',
'watcher_rg': 'NetworkWatcherRG',
'watcher_name': 'NetworkWatcher_{}'.format(resource_group_location),
'flow_log': 'flow_log_test2',
'workspace': self.create_random_name('clitest', 20),
})
# enable network watcher
# self.cmd('network watcher configure -g {rg} --locations {location} --enabled')
# prepare the target resource
self.cmd('network vnet create -g {rg} -n {vnet}')
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet} -n {subnet} --address-prefix 10.0.0.0/24')
self.cmd('network nic create -g {rg} -n {nic} --vnet-name {vnet} --subnet {subnet}')
# prepare workspace
workspace = self.cmd('monitor log-analytics workspace create '
'--resource-group {rg} '
'--location westus '
'--workspace-name {workspace} ').get_output_in_json()
self.kwargs.update({
'workspace_id': workspace['id']
})
#targetId as vnet
self.cmd('network watcher flow-log create '
'--location {location} '
'--resource-group {rg} '
'--vnet {vnet} '
'--storage-account {storage_account} '
'--workspace {workspace_id} '
'--name {flow_log} ')
self.cmd('network watcher flow-log show --location {location} --name {flow_log}')
self.cmd('network watcher flow-log delete --location {location} --name {flow_log}')
with self.assertRaisesRegex(SystemExit, '3'):
self.cmd('network watcher flow-log show --location {location} --name {flow_log}')
#targetId as subnet
self.cmd('network watcher flow-log create '
'--location {location} '
'--resource-group {rg} '
'--vnet {vnet} '
'--subnet {subnet} '
'--storage-account {storage_account} '
'--workspace {workspace_id} '
'--name {flow_log} ')
self.cmd('network watcher flow-log show --location {location} --name {flow_log}')
self.cmd('network watcher flow-log delete --location {location} --name {flow_log}')
with self.assertRaisesRegex(SystemExit, '3'):
self.cmd('network watcher flow-log show --location {location} --name {flow_log}')
#targetId as nic
self.cmd('network watcher flow-log create '
'--location {location} '
'--resource-group {rg} '
'--nic {nic} '
'--storage-account {storage_account} '
'--workspace {workspace_id} '
'--name {flow_log} ')
self.cmd('network watcher flow-log show --location {location} --name {flow_log}')
self.cmd('network watcher flow-log delete --location {location} --name {flow_log}')
with self.assertRaisesRegex(SystemExit, '3'):
self.cmd('network watcher flow-log show --location {location} --name {flow_log}')
@ResourceGroupPreparer(name_prefix='test_nw_flow_log_', location='eastus')
@StorageAccountPreparer(name_prefix='testflowlog', location='eastus', kind='StorageV2')
@AllowLargeResponse(1024)
def test_nw_flow_log_delete(self, resource_group, resource_group_location, storage_account):
self.kwargs.update({
'rg': resource_group,
'location': resource_group_location,
'storage_account': storage_account,
'nsg': 'nsg1',
'watcher_rg': 'NetworkWatcherRG',
'watcher_name': 'NetworkWatcher_{}'.format(resource_group_location),
'flow_log': 'flow_log_test2',
'workspace': self.create_random_name('clitest', 20),
})
# enable network watcher
# self.cmd('network watcher configure -g {rg} --locations {location} --enabled')
# prepare the target resource
self.cmd('network nsg create -g {rg} -n {nsg}')
# prepare workspace
workspace = self.cmd('monitor log-analytics workspace create '
'--resource-group {rg} '
'--location {location} '
'--workspace-name {workspace} ').get_output_in_json()
self.kwargs.update({
'workspace_id': workspace['id']
})
self.cmd('network watcher flow-log create '
'--location {location} '
'--resource-group {rg} '
'--nsg {nsg} '
'--storage-account {storage_account} '
'--workspace {workspace_id} '
'--name {flow_log} ')
self.cmd('network watcher flow-log show --location {location} --name {flow_log}')
self.cmd('network watcher flow-log delete --location {location} --name {flow_log}')
with self.assertRaisesRegex(SystemExit, '3'):
self.cmd('network watcher flow-log show --location {location} --name {flow_log}')
@ResourceGroupPreparer(name_prefix='test_nw_flow_log_', location='westus')
@StorageAccountPreparer(name_prefix='testflowlog', location='westus', kind='StorageV2')
@AllowLargeResponse(1024)
def test_nw_flow_log_show(self, resource_group, resource_group_location, storage_account):
"""
This test is used to demonstrate different outputs between the new and deprecating parameters
:param resource_group:
:param resource_group_location:
:param storage_account:
:return:
"""
self.kwargs.update({
'rg': resource_group,
'location': resource_group_location,
'storage_account': storage_account,
'nsg': 'nsg1',
'watcher_rg': 'NetworkWatcherRG',
'watcher_name': 'NetworkWatcher_{}'.format(resource_group_location),
'flow_log': 'flow_log_test2',
'workspace': self.create_random_name('clitest', 20),
})
# enable network watcher
# self.cmd('network watcher configure -g {rg} --locations {location} --enabled')
# prepare the target resource
nsg_info = self.cmd('network nsg create -g {rg} -n {nsg}').get_output_in_json()
self.kwargs.update({
'nsg_id': nsg_info['NewNSG']['id']
})
# prepare workspace
workspace = self.cmd('monitor log-analytics workspace create '
'--resource-group {rg} '
'--location {location} '
'--workspace-name {workspace} ').get_output_in_json()
self.kwargs.update({
'workspace_id': workspace['id']
})
self.cmd('network watcher flow-log create '
'--location {location} '
'--resource-group {rg} '
'--nsg {nsg} '
'--storage-account {storage_account} '
'--workspace {workspace_id} '
'--name {flow_log} ')
# This output is new Azure Management Resource formatted.
self.cmd('network watcher flow-log show --location {location} --name {flow_log}', checks=[
self.check('name', self.kwargs['flow_log']),
self.check('enabled', True),
self.check('format.type', 'JSON'),
self.check('format.version', 1),
self.check('flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.enabled', False),
self.check('flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.workspaceResourceId',
self.kwargs['workspace_id']),
self.check('retentionPolicy.days', 0),
self.check('retentionPolicy.enabled', False),
])
# This output is deprecating
self.cmd('network watcher flow-log show --nsg {nsg_id}', checks=[
self.check('enabled', True),
self.check('format.type', 'JSON'),
self.check('format.version', 1),
self.check('flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.enabled', False),
self.check('flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.workspaceResourceId',
self.kwargs['workspace_id']),
self.check('retentionPolicy.days', 0),
self.check('retentionPolicy.enabled', False)
])
@ResourceGroupPreparer(name_prefix='test_nw_flow_log_', location='centraluseuap')
@StorageAccountPreparer(name_prefix='testflowlog', location='centraluseuap', kind='StorageV2')
def test_nw_flow_log_update_vnetfl(self, resource_group, resource_group_location, storage_account):
self.kwargs.update({
'rg': resource_group,
'location': resource_group_location,
'storage_account': storage_account,
'nsg': 'nsg1',
'vnet': 'vnet1',
'subnet': 'subnet1',
'nic': 'nic1',
'watcher_rg': 'NetworkWatcherRG',
'watcher_name': 'NetworkWatcher_{}'.format(resource_group_location),
'flow_log': 'flow_log_test2',
'workspace': self.create_random_name('clitest', 20),
})
# enable network watcher
# self.cmd('network watcher configure -g {rg} --locations {location} --enabled')
# prepare the target resource
self.cmd('network nsg create -g {rg} -n {nsg}')
self.cmd('network vnet create -g {rg} -n {vnet}')
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet} -n {subnet} --address-prefix 10.0.0.0/24')
self.cmd('network nic create -g {rg} -n {nic} --vnet-name {vnet} --subnet {subnet}')
# prepare workspace
workspace = self.cmd('monitor log-analytics workspace create '
'--resource-group {rg} '
'--location eastus '
'--workspace-name {workspace} ').get_output_in_json()
self.kwargs.update({
'workspace_id': workspace['id']
})
self.cmd('network watcher flow-log create '
'--location {location} '
'--resource-group {rg} '
'--vnet {vnet} '
'--storage-account {storage_account} '
'--workspace {workspace_id} '
'--name {flow_log} ')
res1 = self.cmd('network watcher flow-log show --location {location} --name {flow_log}').get_output_in_json()
self.assertEqual(res1['name'], self.kwargs['flow_log'])
self.assertEqual(res1['enabled'], True)
self.assertEqual(res1['retentionPolicy']['days'], 0)
self.assertEqual(res1['retentionPolicy']['enabled'], False)
self.assertIsNone(res1['tags'])
#update targetId from vnet to nic
res2 = self.cmd('network watcher flow-log update '
'--location {location} '
'--name {flow_log} '
'--nic {nic} '
'--resource-group {rg} '
'--retention 2 '
'--tags foo=bar ').get_output_in_json()
self.assertEqual(res2['name'], self.kwargs['flow_log'])
self.assertEqual(res2['enabled'], True)
self.assertTrue(res2['targetResourceId'].endswith(self.kwargs['nic']))
self.assertEqual(res2['name'], self.kwargs['flow_log'])
self.assertEqual(res2['retentionPolicy']['days'], 2)
self.assertEqual(res2['retentionPolicy']['enabled'], True)
self.assertIsNotNone(res2['tags'])
self.cmd('network watcher flow-log delete --location {location} --name {flow_log}')
with self.assertRaisesRegex(SystemExit, '3'):
self.cmd('network watcher flow-log show --location {location} --name {flow_log}')
#targetId as subnet
self.cmd('network watcher flow-log create '
'--location {location} '
'--resource-group {rg} '
'--vnet {vnet} '
'--subnet {subnet} '
'--storage-account {storage_account} '
'--workspace {workspace_id} '
'--name {flow_log} ')
res1 = self.cmd('network watcher flow-log show --location {location} --name {flow_log}').get_output_in_json()
self.assertEqual(res1['name'], self.kwargs['flow_log'])
self.assertEqual(res1['enabled'], True)
self.assertEqual(res1['retentionPolicy']['days'], 0)
self.assertEqual(res1['retentionPolicy']['enabled'], False)
self.assertIsNone(res1['tags'])
#update targetId from subnet to nsg
res2 = self.cmd('network watcher flow-log update '
'--location {location} '
'--name {flow_log} '
'--nsg {nsg} '
'--resource-group {rg} '
'--retention 2 '
'--tags foo=bar ').get_output_in_json()
self.assertEqual(res2['name'], self.kwargs['flow_log'])
self.assertEqual(res2['enabled'], True)
self.assertTrue(res2['targetResourceId'].endswith(self.kwargs['nsg']))
self.assertEqual(res2['name'], self.kwargs['flow_log'])
self.assertEqual(res2['retentionPolicy']['days'], 2)
self.assertEqual(res2['retentionPolicy']['enabled'], True)
self.assertIsNotNone(res2['tags'])
self.cmd('network watcher flow-log delete --location {location} --name {flow_log}')
with self.assertRaisesRegex(SystemExit, '3'):
self.cmd('network watcher flow-log show --location {location} --name {flow_log}')
#targetId as NSG
self.cmd('network watcher flow-log create '
'--location {location} '
'--resource-group {rg} '
'--nsg {nsg} '
'--storage-account {storage_account} '
'--workspace {workspace_id} '
'--name {flow_log} ')
res1 = self.cmd('network watcher flow-log show --location {location} --name {flow_log}').get_output_in_json()
self.assertEqual(res1['name'], self.kwargs['flow_log'])
self.assertEqual(res1['enabled'], True)
self.assertEqual(res1['retentionPolicy']['days'], 0)
self.assertEqual(res1['retentionPolicy']['enabled'], False)
self.assertIsNone(res1['tags'])
#update targetId from nsg to vnet
res2 = self.cmd('network watcher flow-log update '
'--location {location} '
'--name {flow_log} '
'--vnet {vnet} '
'--resource-group {rg} '
'--retention 2 '
'--tags foo=bar ').get_output_in_json()
self.assertEqual(res2['name'], self.kwargs['flow_log'])
self.assertEqual(res2['enabled'], True)
self.assertTrue(res2['targetResourceId'].endswith(self.kwargs['vnet']))
self.assertEqual(res2['name'], self.kwargs['flow_log'])
self.assertEqual(res2['retentionPolicy']['days'], 2)
self.assertEqual(res2['retentionPolicy']['enabled'], True)
self.assertIsNotNone(res2['tags'])
@ResourceGroupPreparer(name_prefix='test_nw_flow_log_', location='eastus')
@StorageAccountPreparer(name_prefix='testflowlog', location='eastus', kind='StorageV2')
def test_nw_flow_log_update(self, resource_group, resource_group_location, storage_account):
self.kwargs.update({
'rg': resource_group,
'location': resource_group_location,
'storage_account': storage_account,
'storage_account_2': 'storageaccount0395',
'nsg': 'nsg1',
'watcher_rg': 'NetworkWatcherRG',
'watcher_name': 'NetworkWatcher_{}'.format(resource_group_location),
'flow_log': 'flow_log_test2',
'workspace': self.create_random_name('clitest', 20),
})
# enable network watcher
# self.cmd('network watcher configure -g {rg} --locations {location} --enabled')
# prepare the target resource
nsg_info = self.cmd('network nsg create -g {rg} -n {nsg}').get_output_in_json()
self.kwargs.update({
'nsg_id': nsg_info['NewNSG']['id']
})
# prepare another storage account in another resource group
storage_info = self.cmd('storage account create '
'--resource-group {rg} '
'--name {storage_account_2} --https-only').get_output_in_json()
self.kwargs.update({
'another_storage': storage_info['id']
})
# prepare workspace
workspace = self.cmd('monitor log-analytics workspace create '
'--resource-group {rg} '
'--location {location} '
'--workspace-name {workspace} ').get_output_in_json()
self.kwargs.update({
'workspace_id': workspace['id']
})
self.cmd('network watcher flow-log create '
'--location {location} '
'--resource-group {rg} '
'--nsg {nsg} '
'--storage-account {storage_account} '
'--workspace {workspace_id} '
'--name {flow_log} ')
res1 = self.cmd('network watcher flow-log show --location {location} --name {flow_log}').get_output_in_json()
self.assertEqual(res1['name'], self.kwargs['flow_log'])
self.assertEqual(res1['enabled'], True)
self.assertEqual(res1['retentionPolicy']['days'], 0)
self.assertEqual(res1['retentionPolicy']['enabled'], False)
self.assertTrue(res1['storageId'].endswith(self.kwargs['storage_account']))
self.assertIsNone(res1['tags'])
res2 = self.cmd('network watcher flow-log update '
'--location {location} '
'--name {flow_log} '
'--retention 2 '
'--storage-account {another_storage} '
'--tags foo=bar ').get_output_in_json()
self.assertEqual(res2['name'], self.kwargs['flow_log'])
self.assertEqual(res2['enabled'], True)
self.assertEqual(res2['retentionPolicy']['days'], 2)
self.assertEqual(res2['retentionPolicy']['enabled'], True)
self.assertTrue(res2['storageId'].endswith(self.kwargs['storage_account_2']))
self.assertIsNotNone(res2['tags'])
| {
"content_hash": "f1cdb3d5a63b7bbe79174b3921c054f7",
"timestamp": "",
"source": "github",
"line_count": 565,
"max_line_length": 117,
"avg_line_length": 46.571681415929206,
"alnum_prop": 0.5590772621897921,
"repo_name": "yugangw-msft/azure-cli",
"id": "2b7fbf09c377350770847e85e9cd15174c7bc638",
"size": "26659",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/network/tests/latest/test_nw_flow_log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
} |
from debian_tools.command_process import CommandProcess
def main():
process = CommandProcess([
'flake8',
'--exclude', '.venv,.git,.idea,.tox',
'--verbose',
'--max-complexity', '5'
])
process.print_output()
if __name__ == '__main__':
main()
| {
"content_hash": "5d76bb6a9f935225e5ce670752f265b0",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 55,
"avg_line_length": 19.466666666666665,
"alnum_prop": 0.5376712328767124,
"repo_name": "FunTimeCoding/debian-tools",
"id": "34358e0f37b898f95e25d6781c9ef7d67d7c5afd",
"size": "316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mccabe.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "89"
},
{
"name": "Python",
"bytes": "13234"
},
{
"name": "Shell",
"bytes": "19418"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
# Safe User import for Django < 1.5
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PhoneDevice'
db.create_table('two_factor_phonedevice', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label])),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('confirmed', self.gf('django.db.models.fields.BooleanField')(default=True)),
('number', self.gf('django.db.models.fields.CharField')(max_length=16)),
('key', self.gf('django.db.models.fields.CharField')(default='c4f6544f9fe5a7045498c99456e4abee1a3b3dca', max_length=40)),
('method', self.gf('django.db.models.fields.CharField')(max_length=4)),
))
db.send_create_signal('two_factor', ['PhoneDevice'])
def backwards(self, orm):
# Deleting model 'PhoneDevice'
db.delete_table('two_factor_phonedevice')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
User._meta.pk.attname: (
'django.db.models.fields.AutoField', [],
{'primary_key': 'True',
'db_column': "'%s'" % User._meta.pk.column}
),
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'two_factor.phonedevice': {
'Meta': {'object_name': 'PhoneDevice'},
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "'5c351effc09931c7de3b8fb6f93f55834a598580'", 'max_length': '40'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label})
},
'two_factor.token': {
'Meta': {'object_name': 'Token'},
'backup_phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'seed': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['%s']" % user_orm_label, 'unique': 'True'})
},
'two_factor.verifiedcomputer': {
'Meta': {'object_name': 'VerifiedComputer'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label}),
'verified_until': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['two_factor']
| {
"content_hash": "f0c0a0ed9d9c66e6f90b4a3da79fcbbb",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 182,
"avg_line_length": 56.845360824742265,
"alnum_prop": 0.5614798694232862,
"repo_name": "moreati/django-two-factor-auth",
"id": "f102a6f740c4d40bbdf22886f5aaf02ba3a39b31",
"size": "5538",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "two_factor/south_migrations/0003_add_model_phone_device.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "20642"
},
{
"name": "Makefile",
"bytes": "816"
},
{
"name": "Python",
"bytes": "158589"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2013 Niko Skrypnik
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
Geometry class
=============
"""
class Geometry(object):
def __init__(self, name=''):
self.name = name
self.faces = []
self.vertices = []
self.face_vertex_uvs = [[]]
def compute_vertex_normal(self):
pass
| {
"content_hash": "9b33187c65e6992d1988b8d63510559f",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 77,
"avg_line_length": 32.926829268292686,
"alnum_prop": 0.737037037037037,
"repo_name": "KeyWeeUsr/kivy3",
"id": "a7780181f46a1d6c6387cebd9a4b07ef6ae07626",
"size": "1350",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "kivy3/core/geometry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "2477"
},
{
"name": "Python",
"bytes": "59562"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
class Breadcrumb(object):
def __init__(self, name, url=None):
self.name = name
self.url = url
def get_absolute_url(self):
if not isinstance(self.url, basestring):
if len(self.url) > 1: #hack
return reverse(self.url[0], args=self.url[1], kwargs=self.url[2])
return reverse(*self.url)
return self.url
| {
"content_hash": "3d32d099667610fbfa5c92946c24da07",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 81,
"avg_line_length": 30.785714285714285,
"alnum_prop": 0.5893271461716937,
"repo_name": "zbyte64/django-dockit",
"id": "7b333995fb0213c7eed53d89c40d5d69232c9a7a",
"size": "431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dockit/admin/breadcrumbs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "2494"
},
{
"name": "Python",
"bytes": "428384"
}
],
"symlink_target": ""
} |
from string import punctuation
import operator
import pandas as pd
def WordCount(dataframe, country, column_str):
'''
This function do word count for a dataframe. It first group the dataframe
based on the column with the country name, and then cat all the str stored
in that country together, and perform a wordcount on the concatenated str.
It returns a dictionary of all the word for each country.
params: dataframe: the dataframe on which you want to perform wordcount
params: country: the column on which the dataframe is grouped by
params: column_name_str: the column in which the str is stored
'''
dic = {}
UniqueNames = dataframe.country.unique()
dic = {item: pd.DataFrame for item in UniqueNames}
for key in dic.keys():
dic[key] = dataframe[dataframe.country == key]
dic2 = {}
for p in dic.keys():
dic2[p] = reduce(lambda x, y: x + y,
dic[p][column_str], '')
wc = {}
for k, v in dic2.iteritems():
ls = dic2.get(k).lower().translate(None, punctuation).split(' ')
freq = {}
for word in ls:
freq[word] = ls.count(word)
sorted_freq = sorted(freq.items(), key=operator.itemgetter(1),
reverse=True)
wc[k] = sorted_freq
return wc
| {
"content_hash": "0a85dc18c97483c76200ff75b5145e13",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 78,
"avg_line_length": 38.285714285714285,
"alnum_prop": 0.6216417910447761,
"repo_name": "ManushiM/infoviz_refugee_project",
"id": "49610a991e0c0b44dd0b1ba7af1dab68cf0dd8a3",
"size": "1362",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "dataProcessing/wordcount.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "68748"
},
{
"name": "JavaScript",
"bytes": "11954"
},
{
"name": "Jupyter Notebook",
"bytes": "7225"
},
{
"name": "Python",
"bytes": "8960"
}
],
"symlink_target": ""
} |
from qdb_cache.cache import QdbCache
| {
"content_hash": "f0b683cc9977b9bfb912ff3033dcfa41",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 36,
"avg_line_length": 37,
"alnum_prop": 0.8378378378378378,
"repo_name": "holys/django-qdb-cache",
"id": "a10aea25c7f6ce29476f56448d530375943e5281",
"size": "37",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qdb_cache/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11617"
}
],
"symlink_target": ""
} |
"""Offer sun based automation rules."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
CONF_EVENT,
CONF_OFFSET,
CONF_PLATFORM,
SUN_EVENT_SUNRISE,
)
from homeassistant.helpers.event import async_track_sunrise, async_track_sunset
import homeassistant.helpers.config_validation as cv
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
TRIGGER_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): "sun",
vol.Required(CONF_EVENT): cv.sun_event,
vol.Required(CONF_OFFSET, default=timedelta(0)): cv.time_period,
}
)
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for events based on configuration."""
event = config.get(CONF_EVENT)
offset = config.get(CONF_OFFSET)
@callback
def call_action():
"""Call action with right context."""
hass.async_run_job(
action, {"trigger": {"platform": "sun", "event": event, "offset": offset}}
)
if event == SUN_EVENT_SUNRISE:
return async_track_sunrise(hass, call_action, offset)
return async_track_sunset(hass, call_action, offset)
| {
"content_hash": "53655c0188695ed8dfde7af3c882d518",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 86,
"avg_line_length": 27.955555555555556,
"alnum_prop": 0.6820349761526232,
"repo_name": "joopert/home-assistant",
"id": "66892784a54d1d02925d52e6120fd01953ea0308",
"size": "1258",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/automation/sun.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18670593"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS.append('testserver')
ALLOWED_HOSTS.append('127.0.0.1')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': get_env_variable('DATABASE_NAME'),
'USER': get_env_variable('DATABASE_USER'),
'PASSWORD': get_env_variable('DATABASE_PASSWORD'),
'HOST': '',
'PORT': '',
}
}
| {
"content_hash": "50eb1eb78ee28f1c999cf686a43d27df",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 24.36842105263158,
"alnum_prop": 0.6047516198704104,
"repo_name": "swpease/Flavify",
"id": "740399aa5d28d73b23badd62c392ff873b4287d9",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flavify/settings/testing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2013"
},
{
"name": "HTML",
"bytes": "40382"
},
{
"name": "JavaScript",
"bytes": "5361"
},
{
"name": "Python",
"bytes": "47265"
}
],
"symlink_target": ""
} |
import os,json,serial,socket
import subprocess,sys,time
spectro_config = os.environ['SPECTROCONFIG']
configFilePath = os.path.join(spectro_config,'hardware.json')
json_text=open(configFilePath).read()
hardware_config = json.loads(json_text)
class Serial:
def __init__(self,device,timeout=1):
print(f"Serial({device}) :",end="")
serial_device_config = hardware_config[device]['serial_port']
self.baud_rate = serial_device_config['baud_rate']
self.device_type = serial_device_config['type']
if self.device_type == "moxa_tcp":
# init with direct tcpip socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = 4000+ serial_device_config['port_id']
self.address = serial_device_config['address']
print(f"Connect to moxa_tcp socket address = {self.address}:{self.port}")
self.socket.connect( (self.address, self.port))
self.makefile_ReadLine = self.socket.makefile("rb")
else:
com_device = os.path.expanduser(serial_device_config['tty_path'])
if self.device_type == "moxa_socat":
self.ser = open_socat(device)
else:
self.ser = serial.Serial(com_device,self.baud_rate,timeout = timeout)
def write(self,data):
print(f"ser.write({data})",flush=True)
if self.device_type == "moxa_tcp":
self.socket.send(data)
elif self.device_type == "moxa_socat" or self.device_type == "local":
self.ser.write(data)
def readline(self):
print(f"ser.readline() ",end="",flush=True)
if self.device_type == "moxa_tcp":
data = self.makefile_ReadLine.readline()
print(f" data = {data}",flush=True)
return data
elif self.device_type == "moxa_socat" or self.device_type == "local":
data = self.ser.readline()
print(f" data = {data}",flush=True)
return data
def read(self,nbChar=1):
print(f"ser.read() ",end="",flush=True)
if self.device_type == "moxa_tcp":
data = self.makefile_ReadLine.read(nbChar)
print(f" data = {data}",flush=True)
return data
elif self.device_type == "moxa_socat" or self.device_type == "local":
data = self.ser.read(nbChar)
print(f" data = {data}",flush=True)
return data
def flush(self):
if self.device_type == "moxa_socat" or self.device_type == "local":
self.ser.flush()
def close(self):
print(f"ser.close()")
if self.device_type == "moxa_tcp":
self.makefile_ReadLine.close()
self.socket.close()
elif self.device_type == "moxa_socat" or self.device_type == "local":
self.ser.close()
def open_socat(deviceName,timeout=1):
serial_config = hardware_config[deviceName]['serial_port']
tty_path_global = os.path.expanduser(serial_config['tty_path'])
print(f"com_port_url = {tty_path_global}")
try:
ser = serial.Serial(tty_path_global,serial_config["baud_rate"],timeout=timeout)
print(f"Success in open com port {tty_path_global}")
return ser
except serial.serialutil.SerialException:
print("Failed to open, we probably need to lunch socat tunnel..")
create_socat(deviceName)
try:
print("Retry open device after socat")
ser = serial.Serial(tty_path_global,serial_config["baud_rate"],timeout=timeout)
print(f"Success in open com port {tty_path_global}")
return ser
except serial.serialutil.SerialException:
print("Failed to open, probably incorrect configuration or device is off.")
return None
def create_socat(deviceName):
print(f"Try to open with socat the device {deviceName}")
serial_config = hardware_config[deviceName]['serial_port']
server_address = serial_config['address']
tcp_port = 4000 + serial_config['port_id']
tty_path_global = os.path.expanduser(serial_config['tty_path'])
print(f"socat.create() pipe {tty_path_global} <-> {server_address}:{tcp_port}")
socat_cmd = f"socat pty,link={tty_path_global},group-late=dialout,mode=660 tcp:{server_address}:{tcp_port} &"
print(f"call {socat_cmd}")
try:
retcode = subprocess.call(socat_cmd, shell=True)
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
else:
print("Child returned", retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
time.sleep(1)
def main():
print("test for serial Port")
ser = Serial("eShel")
ser.close()
ser = Serial("telescope")
ser.close()
ser = Serial("powerControl")
ser.close()
# ser = open_socat("eShel")
if __name__ == "__main__":
main()
| {
"content_hash": "9f750232057a379fb9116dcad2df139d",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 114,
"avg_line_length": 35.56115107913669,
"alnum_prop": 0.6073234877604694,
"repo_name": "tlemoult/spectroDb",
"id": "79ab434c4c30fdbf37234d13cb236747fe4614cf",
"size": "4943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myPythonLib/libobs/serialPort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1953"
},
{
"name": "C++",
"bytes": "10317"
},
{
"name": "CSS",
"bytes": "1051"
},
{
"name": "HTML",
"bytes": "4937"
},
{
"name": "JavaScript",
"bytes": "187028"
},
{
"name": "Makefile",
"bytes": "4190"
},
{
"name": "PHP",
"bytes": "30157"
},
{
"name": "Python",
"bytes": "275586"
},
{
"name": "Shell",
"bytes": "6410"
},
{
"name": "Tcl",
"bytes": "1219"
}
],
"symlink_target": ""
} |
from django import forms
class ContactForm(forms.Form):
contact_name = forms.CharField(max_length=100)
contact_email = forms.EmailField()
message_subject = forms.CharField(max_length=100)
message_body = forms.CharField(widget=forms.Textarea)
cc_myself = forms.BooleanField(required=False)
| {
"content_hash": "16dc10f6f82049c99ed5f7bbb5e40d57",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 57,
"avg_line_length": 34.55555555555556,
"alnum_prop": 0.7459807073954984,
"repo_name": "vacoj/MINDBODY-Django",
"id": "1f3903b3f8607b99c71c7622948dd07408ccda1f",
"size": "311",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "web/forms/contact_form.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4686"
},
{
"name": "HTML",
"bytes": "43096"
},
{
"name": "JavaScript",
"bytes": "6232"
},
{
"name": "Python",
"bytes": "145902"
}
],
"symlink_target": ""
} |
from corehq.apps.domain.models import Domain
from corehq.apps.sms.api import send_sms_to_verified_number
def fallback_handler(v, text, msg):
domain_obj = Domain.get_by_name(v.domain, strict=True)
if domain_obj.use_default_sms_response and domain_obj.default_sms_response:
send_sms_to_verified_number(v, domain_obj.default_sms_response)
return True
| {
"content_hash": "0a4dbdb455f9b8a16d5c7e006995cb5e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 79,
"avg_line_length": 41.111111111111114,
"alnum_prop": 0.7486486486486487,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "a831770ad34d6e935cbcbe4e5227fd97a9930c9d",
"size": "370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/sms/handlers/fallback.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
} |
"""Add CCS admin data controller role
Revision ID: 1270
Revises: 1260
Create Date: 2018-11-22 14:51:03.013362
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1270'
down_revision = '1260'
def upgrade():
op.execute("COMMIT") # See: http://stackoverflow.com/a/30910417/15720
op.execute("ALTER TYPE user_roles_enum ADD VALUE IF NOT EXISTS 'admin-ccs-data-controller' AFTER 'admin-framework-manager';")
def downgrade():
# Cannot remove user role value
pass
| {
"content_hash": "a3872ff2ae89c52acc123cdea7e6cdfb",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 129,
"avg_line_length": 22.208333333333332,
"alnum_prop": 0.7204502814258912,
"repo_name": "alphagov/digitalmarketplace-api",
"id": "4b24762f42607c649803e92254b48c806f62a36a",
"size": "533",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "migrations/versions/1270_add_ccs_admin_data_controller_role.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "39"
},
{
"name": "JavaScript",
"bytes": "4777"
},
{
"name": "Makefile",
"bytes": "2140"
},
{
"name": "Mako",
"bytes": "414"
},
{
"name": "Nix",
"bytes": "3614"
},
{
"name": "Python",
"bytes": "1536454"
},
{
"name": "Shell",
"bytes": "973"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(name='PyFL593FL',
version='0.0.1',
author='Ronny Eichler',
author_email='[email protected]',
url='https://github.com/wonkoderverstaendige/PyFL593FL',
download_url='https://github.com/wonkoderverstaendige/PyFL593FL',
description='Python interface to the Team Wavelength FL593FL laser diode driver evaluation board',
long_description='Python interface to the Team Wavelength FL593FL laser diode driver evaluation board',
packages = find_packages(),
include_package_data = True,
package_data = {
'': ['*.txt', '*.rst'],
'PyFL593FL': ['data/*.html', 'data/*.css'],
},
exclude_package_data = { '': ['README.md'] },
scripts = ['bin/my_program'],
keywords='hardware serial laser-diode driver',
license='MIT',
classifiers=['Development Status :: 3 - Alpha',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',
],
#setup_requires = ['python-stdeb', 'fakeroot', 'python-all'],
install_requires = ['setuptools', 'pyserial', 'PyUSB'],
extra_requires = {
'gui': ['PyQt4'],
)
| {
"content_hash": "b66b3770f49a1c8048e325d83134e4b6",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 109,
"avg_line_length": 39.48717948717949,
"alnum_prop": 0.5733766233766234,
"repo_name": "wonkoderverstaendige/PyFL593FL",
"id": "67115f5628abbaaac3a812457a8b9a47d7f8e1b5",
"size": "1578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "182513"
},
{
"name": "HTML",
"bytes": "23466"
},
{
"name": "JavaScript",
"bytes": "262516"
},
{
"name": "Makefile",
"bytes": "1245"
},
{
"name": "Python",
"bytes": "137200"
}
],
"symlink_target": ""
} |
<<<<<<< HEAD
<<<<<<< HEAD
#
# This file is for everybody to add tests for bugs that aren't
# fixed yet. Please add a test case and appropriate bug description.
#
# When you fix one of the bugs, please move the test to the correct
# test_ module.
#
import unittest
from test import support
#
# No test cases for outstanding bugs at the moment.
#
if __name__ == "__main__":
unittest.main()
=======
#
# This file is for everybody to add tests for bugs that aren't
# fixed yet. Please add a test case and appropriate bug description.
#
# When you fix one of the bugs, please move the test to the correct
# test_ module.
#
import unittest
from test import support
#
# No test cases for outstanding bugs at the moment.
#
if __name__ == "__main__":
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
#
# This file is for everybody to add tests for bugs that aren't
# fixed yet. Please add a test case and appropriate bug description.
#
# When you fix one of the bugs, please move the test to the correct
# test_ module.
#
import unittest
from test import support
#
# No test cases for outstanding bugs at the moment.
#
if __name__ == "__main__":
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| {
"content_hash": "76fb4ca1a9b1d13d363231afa859251d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 68,
"avg_line_length": 20.833333333333332,
"alnum_prop": 0.6944,
"repo_name": "ArcherSys/ArcherSys",
"id": "ad86e71ed0d1d57ae601feac3dbd7fd507a6a29d",
"size": "1250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/test/outstanding_bugs.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
'''Un acortador de URLs pero que permite:
* Editar adonde apunta el atajo más tarde
* Eliminar atajos
* Definir tests para saber si el atajo es válido
'''
import os
import string
import datetime
from twill.commands import go, code, find, notfind, title
def minitwill(url, script):
'''Dada una URL y un script en una versión limitada
de twill, ejecuta ese script.
Apenas una línea falla, devuelve False.
Si todas tienen éxito, devuelve True.
Ejemplos:
>>> minitwill('http://google.com','code 200')
==> at http://www.google.com.ar/
True
>>> minitwill('http://google.com','title bing')
==> at http://www.google.com.ar/
title is 'Google'.
False
'''
go (url)
for line in script.splitlines():
cmd,arg = line.split(' ',1)
try:
if cmd in ['code','find','notfind','title']:
r = globals()[cmd](arg)
except:
return False
return True
# Usamos storm para almacenar los datos
from storm.locals import *
# FIXME: tengo que hacer más consistentes los nombres
# de los métodos.
class Atajo(object):
'''Representa una relación slug <=> URL
Miembros:
id = Único, creciente, entero (primary key)
url = la URL original
test = un test de validez de la URL
user = el dueño del atajo
activo = Si este atajo está activo o no.
Nunca hay que borrarlos, sino el ID puede volver
atrás y se "recicla" una URL. ¡Malo, malo, malo!
status = Resultado del último test (bien/mal)
ultimo = Fecha/hora del último test
'''
# Hacer que los datos se guarden via Storm
__storm_table__ = "atajo"
id = Int(primary=True)
url = Unicode()
test = Unicode()
user = Unicode()
activo = Bool()
status = Bool()
ultimo = DateTime()
def __init__(self, url, user, test=''):
'''Exigimos la URL y el usuario, test es opcional,
_id es automático.'''
# Hace falta crear esto?
r = self.store.find(Atajo, user = user, url = url)
self.url = url
self.user = user
self.activo = True
# Test por default, verifica que la página exista.
self.test = u'code 200'
if r.count():
# FIXME: esto creo que es una race condition
# Existe la misma URL para el mismo usuario,
# reciclamos el id y el test, pero activa.
viejo = r.one()
Atajo.store.remove(viejo)
self.id = viejo.id
self.test = viejo.test
self.store.add(self)
# Autosave/flush/commit a la base de datos
self.save()
def save(self):
'''Método de conveniencia'''
Atajo.store.flush()
Atajo.store.commit()
@classmethod
def init_db(cls):
# Creamos una base SQLite
if not os.path.exists('pyurl.sqlite'):
cls.database = create_database("sqlite:///pyurl.sqlite")
cls.store = Store (cls.database)
try:
# Creamos la tabla
cls.store.execute ('''
CREATE TABLE atajo (
id INTEGER PRIMARY KEY,
url VARCHAR,
test VARCHAR,
user VARCHAR,
activo TINYINT,
status TINYINT,
ultimo TIMESTAMP
) ''' )
cls.store.flush()
cls.store.commit()
except:
pass
else:
cls.database = create_database("sqlite:///pyurl.sqlite")
cls.store = Store (cls.database)
# Caracteres válidos en un atajo de URL
validos = string.letters + string.digits
def slug(self):
'''Devuelve el slug correspondiente al
ID de este atajo
Básicamente un slug es un número en base 62, representado usando
a-zA-Z0-9 como "dígitos", y dado vuelta (más significativo
a la derecha.
Ejemplo:
100000 => '4aA'
100001 => '5aA'
'''
s = ''
n = self.id
while n:
s += self.validos[n%62]
n = n // 62
return s
@classmethod
# FIXME: no estoy feliz con esta API
def get(cls, slug = None, user = None, url = None):
''' Dado un slug, devuelve el atajo correspondiente.
Dado un usuario:
Si url es None, devuelve la lista de sus atajos
Si url no es None , devuelve *ese* atajo
'''
if slug is not None:
i = 0
for p,l in enumerate(slug):
i += 62 ** p * cls.validos.index(l)
return cls.store.find(cls, id = i, activo = True).one()
if user is not None:
if url is None:
return cls.store.find(cls, user = user, activo = True)
else:
return cls.store.find(cls, user = user,
url = url, activo = True).one()
def delete(self):
'''Eliminar este objeto de la base de datos'''
self.activo=False
self.save()
def run_test(self):
'''Correr el test con minitwill y almacenar
el resultado'''
self.status = minitwill(self.url, self.test)
self.ultimo = datetime.datetime.now()
self.save()
# Usamos bottle para hacer el sitio
import bottle
# Middlewares
from beaker.middleware import SessionMiddleware
from authkit.authenticate import middleware
from paste.auth.auth_tkt import AuthTKTMiddleware
@bottle.route('/logout')
def logout():
bottle.request.environ['paste.auth_tkt.logout_user']()
if 'REMOTE_USER' in bottle.request.environ:
del bottle.request.environ['REMOTE_USER']
bottle.redirect('/')
@bottle.route('/')
@bottle.view('usuario.tpl')
def alta():
"""Crea un nuevo slug"""
# Requerimos que el usuario esté autenticado.
if not 'REMOTE_USER' in bottle.request.environ:
bottle.abort(401, "Sorry, access denied.")
usuario = bottle.request.environ['REMOTE_USER'].decode('utf8')
# Data va a contener todo lo que el template
# requiere para hacer la página
data ={}
# Esto probablemente debería obtenerse de una
# configuración
data['baseurl'] = 'http://localhost:8080/'
# Si tenemos un parámetro URL, estamos en esta
# funcion porque el usuario envió una URL a acortar.
if 'url' in bottle.request.GET:
# La acortamos
url = bottle.request.GET['url'].decode('utf8')
a = Atajo(url=url, user=usuario)
data['short'] = a.slug()
data['url'] = url
# La probamos
a.run_test()
# Mensaje para el usuario de que el acortamiento
# tuvo éxito.
data['mensaje'] = u'''La URL <a href="%(url)s">%(url)s</a>
se convirtió en:
<a href="%(baseurl)s%(short)s">%(baseurl)s%(short)s</a>'''%data
# Clase CSS que muestra las cosas como buenas
data['clasemensaje']='success'
else:
# No se acortó nada, no hay nada para mostrar.
data['url']=None
data['short']=None
data['mensaje']=None
# Lista de atajos del usuario.
data ['atajos'] = Atajo.get (user = usuario)
# Crear la página con esos datos.
return data
@bottle.route('/:slug/edit')
@bottle.view('atajo.tpl')
def editar(slug):
"""Edita un slug"""
if not 'REMOTE_USER' in bottle.request.environ:
bottle.abort(401, "Sorry, access denied.")
usuario = bottle.request.environ['REMOTE_USER'].decode('utf8')
# Solo el dueño de un atajo puede editarlo
a = Atajo.get(slug)
# Atajo no existe o no sos el dueño
if not a or a.user != usuario:
bottle.abort(404, 'El atajo no existe')
if 'url' in bottle.request.GET:
# El usuario mandó el form
a.url = bottle.request.GET['url'].decode('utf-8')
a.activo = 'activo' in bottle.request.GET
a.test = bottle.request.GET['test'].decode('utf-8')
a.save()
bottle.redirect('/')
return {'atajo':a,
'mensaje':'',
}
@bottle.route('/:slug/del')
def borrar(slug):
"""Elimina un slug"""
if not 'REMOTE_USER' in bottle.request.environ:
bottle.abort(401, "Sorry, access denied.")
usuario = bottle.request.environ['REMOTE_USER'].decode('utf8')
# Solo el dueño de un atajo puede borrarlo
a = Atajo.get(slug)
if a and a.user == usuario:
a.delete()
# FIXME: pasar un mensaje en la sesión
bottle.redirect('/')
@bottle.route('/:slug/test')
def run_test(slug):
"""Corre el test correspondiente a un atajo"""
if not 'REMOTE_USER' in bottle.request.environ:
bottle.abort(401, "Sorry, access denied.")
usuario = bottle.request.environ['REMOTE_USER'].decode('utf8')
# Solo el dueño de un atajo puede probarlo
a = Atajo.get(slug)
if a and a.user == usuario:
a.run_test()
# FIXME: pasar un mensaje en la sesión
bottle.redirect('/')
# Un slug está formado sólo por estos caracteres
@bottle.route('/(?P<slug>[a-zA-Z0-9]+)')
def redir(slug):
"""Redirigir un slug"""
# Buscamos el atajo correspondiente
a = Atajo.get(slug=slug)
if not a:
bottle.abort(404, 'El atajo no existe')
bottle.redirect(a.url)
# Lo de /:filename es para favicon.ico :-)
@bottle.route('/:filename')
@bottle.route('/static/:filename')
def static_file(filename):
"""Archivos estáticos (CSS etc)"""
bottle.send_file(filename, root='./static/')
if __name__=='__main__':
"""Ejecutar con el server de debug de bottle"""
bottle.debug(True)
app = bottle.default_app()
# Mostrar excepciones mientras desarrollamos
app.catchall = False
app = middleware(app,
enable=True,
setup_method='openid',
openid_store_type='file',
openid_template_file=os.path.join(os.getcwd(),
'views','invitado.tpl'),
openid_store_config=os.getcwd(),
openid_path_signedin='/')
app = AuthTKTMiddleware(SessionMiddleware(app),
'some auth ticket secret');
# Inicializar DB
Atajo.init_db()
# Ejecutar aplicación
bottle.run(app)
| {
"content_hash": "e8db31dd295f7dcf8bf409c5dec05008",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 72,
"avg_line_length": 29.05982905982906,
"alnum_prop": 0.5812745098039216,
"repo_name": "rst2pdf/rst2pdf",
"id": "038e742c818ca0d0bd85869c42f8467c845a1886",
"size": "10268",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "rst2pdf/tests/input/pyurl3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "485883"
},
{
"name": "Shell",
"bytes": "2089"
}
],
"symlink_target": ""
} |
"""Iris Classification Model.
Iris problem was first introduced by Fisher's 1936 paper,
The Use of Multiple Measurements in Taxonomic Problems.
This is a classification model with 3 possible output classes and 4 numeric
input features. One of the classes is linearly separable, but the other two
are not.
This sample creates a model to solve the problem using a small neural net
with a single hidden layer.
"""
import json
import os
import tensorflow as tf
import google.cloud.ml.features as features
def runs_on_cloud():
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
return env.get('task', None)
class IrisFeatures(object):
csv_columns = ('key', 'species', 'sepal_length', 'sepal_width',
'petal_length', 'petal_width')
key = features.key('key')
species = features.target('species').discrete()
measurements = [
features.numeric('sepal_length'), features.numeric('sepal_width'),
features.numeric('petal_length'), features.numeric('petal_width')
]
def create_inputs(metadata, input_data=None):
with tf.name_scope('inputs'):
if input_data is None:
input_data = tf.placeholder(tf.string, name='input', shape=(None,))
parsed = features.FeatureMetadata.parse_features(metadata, input_data)
return (input_data, parsed['measurements'], tf.squeeze(parsed['species']),
tf.identity(parsed['key']))
| {
"content_hash": "d8a4f05461916a6514f85ad6f7cba7be",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 30.711111111111112,
"alnum_prop": 0.7069464544138929,
"repo_name": "obulpathi/cloud",
"id": "b7220dffa06d33c5b34a8f1798957fc373d4ee96",
"size": "1979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ml/tensorflow/iris/trainer/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "261"
},
{
"name": "Python",
"bytes": "326566"
},
{
"name": "Shell",
"bytes": "2024"
}
],
"symlink_target": ""
} |
import traceback
import unittest
from tt.tables import TruthTable
class TruthTableTestCase(unittest.TestCase):
"""An extended TestCase with helpers for testing truth tables."""
def helper_test_truth_table(self, expr, expected_table_str=None, **kwargs):
"""Helper to test the creation of truth tables.
This helper will fill up a table completely and compare its ``__str__``
representation with the passed expected string.
:param expr: The value to pass to the ``TruthTable`` constructor.
:type expr: BooleanExpression or str
:param expected_table_str: The expected string representation of the
table.
:type expected_table_str: str
:param kwargs: Keyword args to pass to the ``TruthTable`` constructor.
"""
t = TruthTable(expr, **kwargs)
self.assertEqual(expected_table_str, str(t))
def helper_test_truth_table_fill(self, expr, expected_table_str=None,
init_kwargs={}, **kwargs):
"""Helper to test filling a truth table.
:param expr: The value to pass to the ``TruthTable`` constructor.
:type expr: BooleanExpression or str
:param expected_table_str: The expected string representation of the
table.
:type expected_table_str: str
:param init_kwargs: A dict to pass as the kwargs to the ``TruthTable``
constructor.
:type init_kwargs: Dict
:param kwargs: Keyword args to pass to the fill method.
"""
t = TruthTable(expr, fill_all=False, **init_kwargs)
t.fill(**kwargs)
self.assertEqual(expected_table_str, str(t))
def helper_test_truth_table_raises(self, expr, expected_exc_type=None,
**kwargs):
"""Helper for testing exception conditions for TruthTable.
:param expr: The value to pass to the ``TruthTable`` constructor.
:type expr: BooleanExpression or str
:param expected_exc_type: The exception type expected to be raised.
:type expected_exc_type: Exception
:param kwargs: Keyword args to pass to the ``TruthTable`` constructor.
"""
did_catch = False
try:
TruthTable(expr, **kwargs)
except expected_exc_type:
did_catch = True
except Exception as e:
traceback.print_exc()
self.fail('Received exception of type ' + type(e).__name__ +
' but was expecting type ' + expected_exc_type.__name__ +
'.')
did_catch = True
if not did_catch:
self.fail('No exception thrown.')
def helper_test_truth_table_fill_raises(self, expr, expected_exc_type=None,
**kwargs):
"""Helper for testing exception conditions when filling a table.
:param expr: The value to pass to the ``TruthTable`` constructor.
:type expr: BooleanExpression or str
:param expected_exc_type: The exception type expected to be raised.
:type expected_exc_type: Exception
:param kwargs: Keyword args to pass to the ``TruthTable`` constructor.
"""
did_catch = False
try:
t = TruthTable(expr, fill_all=False)
t.fill(**kwargs)
except expected_exc_type:
did_catch = True
except Exception as e:
traceback.print_exc()
self.fail('Received exception of type ' + type(e).__name__ +
' but was expecting type ' + expected_exc_type.__name__ +
'.')
did_catch = True
if not did_catch:
self.fail('No exception thrown.')
| {
"content_hash": "db9cfc5f9185a951848692c571a27ac8",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 79,
"avg_line_length": 34.68807339449541,
"alnum_prop": 0.5866172970113727,
"repo_name": "welchbj/tt",
"id": "843293d6193a1cd6507b23dd9f24e5fff5bdad34",
"size": "3781",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tt/tests/unit/tables/_helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "833"
},
{
"name": "C",
"bytes": "213920"
},
{
"name": "Python",
"bytes": "416197"
},
{
"name": "Shell",
"bytes": "1255"
}
],
"symlink_target": ""
} |
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import linecache
import re
import sys
import threading
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _convert_stack(stack):
"""Converts a stack extracted using _extract_stack() to a traceback stack.
Args:
stack: A list of n 4-tuples, (filename, lineno, name, frame_globals).
Returns:
A list of n 4-tuples (filename, lineno, name, code), where the code tuple
element is calculated from the corresponding elements of the input tuple.
"""
ret = []
for filename, lineno, name, frame_globals in stack:
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, frame_globals)
if line:
line = line.strip()
else:
line = None
ret.append((filename, lineno, name, line))
return ret
# pylint: disable=line-too-long
def _extract_stack():
"""A lightweight re-implementation of traceback.extract_stack.
NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for
each stack frame using linecache, which results in an abundance of stat()
calls. This implementation does not retrieve the code, and any consumer
should apply _convert_stack to the result to obtain a traceback that can
be formatted etc. using traceback methods.
Returns:
A list of 4-tuples (filename, lineno, name, frame_globals) corresponding to
the call stack of the current thread.
"""
# pylint: enable=line-too-long
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
ret = []
while f is not None:
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
frame_globals = f.f_globals
ret.append((filename, lineno, name, frame_globals))
f = f.f_back
ret.reverse()
return ret
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name` and `dtype` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
try:
if not isinstance(tensor_type.name, property):
raise TypeError("Type %s does not define a `name` property")
except AttributeError:
raise TypeError("Type %s does not define a `name` property")
try:
if not isinstance(tensor_type.dtype, property):
raise TypeError("Type %s does not define a `dtype` property")
except AttributeError:
raise TypeError("Type %s does not define a `dtype` property")
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
# NOTE(ebrevdo): Do not subclass this. If you do, I will break you on purpose.
class _TensorLike(object):
"""Internal cls for grouping Tensor, SparseTensor, ..., for is_instance."""
pass
class Tensor(_TensorLike):
"""Represents one of the outputs of an `Operation`.
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow @{tf.Session}.
This class has two primary purposes:
1. A `Tensor` can be passed as an input to another `Operation`.
This builds a dataflow connection between operations, which
enables TensorFlow to execute an entire `Graph` that represents a
large, multi-step computation.
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
@{tf.Session.run}.
`t.eval()` is a shortcut for calling
`tf.get_default_session().run(t)`.
In the following example, `c`, `d`, and `e` are symbolic `Tensor`
objects, whereas `result` is a numpy array that stores a concrete
value:
```python
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = tf.matmul(c, d)
# Construct a `Session` to execute the graph.
sess = tf.Session()
# Execute the graph and store the value that `e` represents in `result`.
result = sess.run(e)
```
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__"
}
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
self._shape = tensor_shape.unknown_shape()
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
# Attributes used for C++ shape inference. Not inspected, only forwarded.
self._handle_shape = tensor_shape_pb2.TensorShapeProto()
self._handle_dtype = types_pb2.DT_INVALID
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
return "%s:%d" % (self._op.name, self._value_index)
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of this tensor.
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
@{tf.TensorShape}
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
information without having to launch the graph in a session. This
can be used for debugging, and providing early error messages. For
example:
```python
c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
print(c.shape)
==> TensorShape([Dimension(2), Dimension(3)])
d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
print(d.shape)
==> TensorShape([Dimension(4), Dimension(2)])
# Raises a ValueError, because `c` and `d` do not have compatible
# inner dimensions.
e = tf.matmul(c, d)
f = tf.matmul(c, d, transpose_a=True, transpose_b=True)
print(f.shape)
==> TensorShape([Dimension(3), Dimension(4)])
```
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `Tensor.set_shape()` can be used to augment the
inferred shape.
Returns:
A `TensorShape` representing the shape of this tensor.
"""
return self._shape
def _shape_as_list(self):
if self._shape.ndims is not None:
return [dim.value for dim in self._shape.dims]
else:
return None
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
This method can be called multiple times, and will merge the given
`shape` with the current shape of this tensor. It can be used to
provide additional information about the shape of this tensor that
cannot be inferred from the graph alone. For example, this can be used
to provide additional information about the shapes of images:
```python
_, image_data = tf.TFRecordReader(...).read(...)
image = tf.image.decode_png(image_data, channels=3)
# The height and width dimensions of `image` are data dependent, and
# cannot be computed without executing the op.
print(image.shape)
==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])
# We know that each image in this dataset is 28 x 28 pixels.
image.set_shape([28, 28, 3])
print(image.shape)
==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])
```
Args:
shape: A `TensorShape` representing the shape of this tensor.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
self._shape = self._shape.merge_with(shape)
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
return self._consumers
def _add_consumer(self, consumer):
"""Add a consumer to this tensor.
Args:
consumer: an Operation.
Raises:
TypeError: if the consumer is not an Operation.
"""
if not isinstance(consumer, Operation):
raise TypeError("Consumer must be an Operation: %s" % consumer)
self._consumers.append(consumer)
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name,
(", shape=%s" % self.get_shape())
if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name) if self._dtype else "",
(", device=%s" % self.device) if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (
self.name, self.get_shape(), self._dtype.name)
def __hash__(self):
# Necessary to support Python's collection membership operators
return id(self)
def __eq__(self, other):
# Necessary to support Python's collection membership operators
return id(self) == id(other)
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __iter__(self):
"""Dummy method to prevent iteration. Do not call.
NOTE(mrry): If we register __getitem__ as an overloaded operator,
Python will valiantly attempt to iterate over the Tensor from 0 to
infinity. Declaring this method prevents this unintended
behavior.
Raises:
TypeError: when invoked.
"""
raise TypeError("'Tensor' object is not iterable.")
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (e.g. in an `if` statement). For
example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
This disallows ambiguities between testing the Python value vs testing the
dynamic condition of the `Tensor`.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See @{tf.Session.run} for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
def _TensorTensorConversionFunction(t, dtype=None, name=None, as_ref=False):
_ = name, as_ref
if dtype and not dtype.is_compatible_with(t.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r"
% (dtype.name, t.dtype.name, str(t)))
return t
_tensor_conversion_func_registry = {
0: [(Tensor, _TensorTensorConversionFunction)]}
register_dense_tensor_like_type(Tensor)
def convert_to_tensor(value,
dtype=None,
name=None,
preferred_dtype=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
An `Output` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value`.
RuntimeError: If a registered conversion function returns an invalid value.
"""
return internal_convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def internal_convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None):
"""Converts the given `value` to an `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
This function can be useful when composing a new operation in Python
All standard Python op constructors apply this function to each of their
Tensor-valued inputs, which allows those ops to accept numpy arrays, Python
lists, and scalars in addition to `Tensor` objects.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
as_ref: True if we want the mutable view of Variables, if applicable.
preferred_dtype: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value`.
RuntimeError: If a registered conversion function returns an invalid value.
"""
error_prefix = "" if name is None else "%s: " % name
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
for _, funcs_at_priority in sorted(_tensor_conversion_func_registry.items()):
for base_type, conversion_func in funcs_at_priority:
if isinstance(value, base_type):
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError):
# Could not coerce the conversion to use the preferred dtype.
ret = None
if ret is not None and ret is not NotImplemented:
if (ret.dtype.base_dtype !=
dtypes.as_dtype(preferred_dtype).base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype,
dtypes.as_dtype(preferred_dtype).base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
if not isinstance(ret, Tensor):
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r"
% (error_prefix, conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s"
% (error_prefix, conversion_func, base_type,
dtype.name, ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered."
% (error_prefix, value, type(value)))
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a list.")
ret = []
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype))
return ret
def convert_n_to_tensor(values,
dtype=None,
name=None,
preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
preferred_dtype: Optional element type for the returned tensors,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_indexed_slices(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_indexed_slices(value, dtype=None, name=None,
as_ref=False):
"""Converts the given object to an `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, _TensorLike):
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r"
% (dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return internal_convert_to_tensor(value,
dtype=dtype,
name=name,
as_ref=as_ref)
def internal_convert_n_to_tensor_or_indexed_slices(values, dtype=None,
name=None, as_ref=False):
"""Converts `values` to a list of `Tensor` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a list.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_indexed_slices(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_indexed_slices(
values=values, dtype=dtype, name=name, as_ref=False)
def register_tensor_conversion_function(base_type, conversion_func,
priority=100):
"""Registers a function for converting objects of `base_type` to `Tensor`.
The conversion function must have the following signature:
```python
def conversion_func(value, dtype=None, name=None, as_ref=False):
# ...
```
It must return a `Tensor` with the given `dtype` if specified. If the
conversion function creates a new `Tensor`, it should use the given
`name` if specified. All exceptions will be propagated to the caller.
The conversion function may return `NotImplemented` for some
inputs. In this case, the conversion process will continue to try
subsequent conversion functions.
If `as_ref` is true, the function must return a `Tensor` reference,
such as a `Variable`.
NOTE: The conversion functions will execute in order of priority,
followed by order of registration. To ensure that a conversion function
`F` runs before another conversion function `G`, ensure that `F` is
registered with a smaller priority than `G`.
Args:
base_type: The base type or tuple of base types for all objects that
`conversion_func` accepts.
conversion_func: A function that converts instances of `base_type` to
`Tensor`.
priority: Optional integer that indicates the priority for applying this
conversion function. Conversion functions with smaller priority values
run earlier than conversion functions with larger priority values.
Defaults to 100.
Raises:
TypeError: If the arguments do not have the appropriate type.
"""
if not (isinstance(base_type, type) or
(isinstance(base_type, tuple)
and all(isinstance(x, type) for x in base_type))):
raise TypeError("base_type must be a type or a tuple of types.")
if not callable(conversion_func):
raise TypeError("conversion_func must be callable.")
try:
funcs_at_priority = _tensor_conversion_func_registry[priority]
except KeyError:
funcs_at_priority = []
_tensor_conversion_func_registry[priority] = funcs_at_priority
funcs_at_priority.append((base_type, conversion_func))
class IndexedSlices(_TensorLike):
"""A sparse representation of a set of tensor slices at given indices.
This class is a simple wrapper for a pair of `Tensor` objects:
* `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.
* `indices`: A 1-D integer `Tensor` with shape `[D0]`.
An `IndexedSlices` is typically used to represent a subset of a larger
tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.
The values in `indices` are the indices in the first dimension of
the slices that have been extracted from the larger tensor.
The dense tensor `dense` represented by an `IndexedSlices` `slices` has
```python
dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]
```
The `IndexedSlices` class is used principally in the definition of
gradients for operations that have sparse gradients
(e.g. @{tf.gather}).
Contrast this representation with
@{tf.SparseTensor},
which uses multi-dimensional indices and scalar values.
"""
def __init__(self, values, indices, dense_shape=None):
"""Creates an `IndexedSlices`."""
_get_graph_from_inputs([values, indices, dense_shape])
self._values = values
self._indices = indices
self._dense_shape = dense_shape
@property
def values(self):
"""A `Tensor` containing the values of the slices."""
return self._values
@property
def indices(self):
"""A 1-D `Tensor` containing the indices of the slices."""
return self._indices
@property
def dense_shape(self):
"""A 1-D `Tensor` containing the shape of the corresponding dense tensor."""
return self._dense_shape
@property
def name(self):
"""The name of this `IndexedSlices`."""
return self.values.name
@property
def device(self):
"""The name of the device on which `values` will be produced, or `None`."""
return self.values.device
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self.values.dtype
@property
def graph(self):
"""The `Graph` that contains the values, indices, and shape tensors."""
return self._values.graph
def __str__(self):
return "IndexedSlices(indices=%s, values=%s%s)" % (
self._indices, self._values,
(", dense_shape=%s" % self._dense_shape)
if self._dense_shape is not None else "")
def __neg__(self):
return IndexedSlices(-self.values, self.indices, self.dense_shape)
IndexedSlicesValue = collections.namedtuple(
"IndexedSlicesValue", ["values", "indices", "dense_shape"])
def _device_string(dev_spec):
if isinstance(dev_spec, pydev.DeviceSpec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, device=None, attrs=None):
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
device: string, device, or function from NodeDef to string.
Value for the "device" attribute of the NodeDef proto.
attrs: Optional dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef()
node_def.op = compat.as_bytes(op_type)
node_def.name = compat.as_bytes(name)
if attrs is not None:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
if device is not None:
if callable(device):
node_def.device = device(node_def)
else:
node_def.device = _device_string(device)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a TensorFlow `Graph` that takes zero or
more `Tensor` objects as input, and produces zero or more `Tensor`
objects as output. Objects of type `Operation` are created by
calling a Python op constructor (such as
@{tf.matmul})
or @{tf.Graph.create_op}.
For example `c = tf.matmul(a, b)` creates an `Operation` of type
"MatMul" that takes tensors `a` and `b` as input, and produces `c`
as output.
After the graph has been launched in a session, an `Operation` can
be executed by passing it to
@{tf.Session.run}.
`op.run()` is a shortcut for calling `tf.get_default_session().run(op)`.
"""
def __init__(self, node_def, g, inputs=None, output_types=None,
control_inputs=None, input_types=None, original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`.
Used for attributes of `node_def_pb2.NodeDef`, typically `name`,
`op`, and `device`. The `input` attribute is irrelevant here
as it will be computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the
`Tensors` computed by this operation. The length of this list indicates
the number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a
control dependency.
input_types: List of `DType` objects representing the
types of the tensors accepted by the `Operation`. By default
uses `[x.dtype.base_dtype for x in inputs]`. Operations that expect
reference-typed inputs must specify these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the
op type that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
if not isinstance(node_def, node_def_pb2.NodeDef):
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._node_def = copy.deepcopy(node_def)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
self._inputs = list(inputs) # Defensive copy.
for a in self._inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
# Mark that we consume the inputs.
a._add_consumer(self) # pylint: disable=protected-access
if output_types is None:
output_types = []
self._output_types = output_types
self._outputs = [Tensor(self, i, output_type)
for i, output_type in enumerate(output_types)]
if input_types is None:
input_types = [i.dtype.base_dtype for i in self._inputs]
else:
if not all(x.is_compatible_with(i.dtype)
for i, x in zip(self._inputs, input_types)):
raise TypeError("Inputs are not compatible with input types")
self._input_types = input_types
# Build the list of control inputs.
self._control_inputs = []
if control_inputs:
for c in control_inputs:
c_op = None
if isinstance(c, Operation):
c_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
c_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
self._control_inputs.append(c_op)
self._original_op = original_op
self._op_def = op_def
self._traceback = _extract_stack()
# Add this op to the current control flow context:
self._control_flow_context = g._get_control_flow_context()
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
# NOTE(keveman): Control flow context's AddOp could be creating new ops and
# setting op.inputs[index] = new_op. Thus the new ops' id could be larger
# than this op's id even though this op depend on them. Therefore, delaying
# assigning id to this op until all ops this could be dependent on are
# created.
self._id_value = self._graph._next_id() # pylint: disable=protected-access
self._recompute_node_def()
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [compat.as_bytes("loc:@%s" %
self._node_def.name)]
if "_class" not in self._node_def.attr:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [class_name
for class_name in self.get_attr("_class")
if class_name.startswith(b"loc:@")]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, context):
"""Sets the current control flow context of this op.
Args:
context: a context object.
"""
self._control_flow_context = context
@property
def name(self):
"""The full name of this operation."""
return self._node_def.name
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
return self._node_def.device
def _set_device(self, device):
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
self._node_def.device = _device_string(device)
def _add_input(self, tensor, dtype=None):
"""Add a new input to this operation.
Args:
tensor: the Tensor to add as an input.
dtype: tf.DType: type of the input; defaults to
the tensor's dtype.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
if dtype is None:
dtype = tensor.dtype
else:
dtype = dtypes.as_dtype(dtype)
if not dtype.is_compatible_with(tensor.dtype):
raise TypeError(
"Cannot convert a tensor of type %s to an input of type %s"
% (tensor.dtype.name, dtype.name))
self._inputs.append(tensor)
self._input_types.append(dtype)
tensor._add_consumer(self) # pylint: disable=protected-access
self._recompute_node_def()
def _update_input(self, index, tensor, dtype=None):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
dtype: tf.DType: type of the input; defaults to
the tensor's dtype.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
if dtype is None:
dtype = tensor.dtype
else:
dtype = dtypes.as_dtype(dtype)
if not dtype.is_compatible_with(tensor.dtype):
raise TypeError(
"Cannot convert a tensor of type %s to an input of type %s"
% (tensor.dtype.name, dtype.name))
self._inputs[index].consumers().remove(self)
self._inputs[index] = tensor
self._input_types[index] = dtype
tensor._add_consumer(self) # pylint: disable=protected-access
self._recompute_node_def()
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
if ops:
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
_assert_same_graph(self, op)
self._control_inputs.append(op)
self._recompute_node_def()
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
self._add_control_inputs([op])
# Methods below are used when building the NodeDef and Graph proto.
def _recompute_node_def(self):
del self._node_def.input[:]
self._node_def.input.extend([t._as_node_def_input() for t in self._inputs])
if self._control_inputs:
self._node_def.input.extend(["^%s" % op.name for op in
self._control_inputs])
def __str__(self):
return str(self._node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
# pylint: disable=protected-access
class _InputList(object):
"""Immutable input list wrapper."""
def __init__(self, op):
self._op = op
def __iter__(self):
return iter(self._op._inputs)
def __len__(self):
return len(self._op._inputs)
def __bool__(self):
return bool(self._op._inputs)
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __getitem__(self, i):
return self._op._inputs[i]
# pylint: enable=protected-access
@property
def inputs(self):
"""The list of `Tensor` objects representing the data inputs of this op."""
return Operation._InputList(self)
@property
def _input_dtypes(self):
return self._input_types
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
return self._control_inputs
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
return self._node_def.op
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
"""Returns a serialized `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
return self._node_def
@property
def op_def(self):
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
return self._op_def
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return _convert_stack(self._traceback)
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ["s", "i", "f", "b", "type", "shape", "tensor"]
if name not in self._node_def.attr:
raise ValueError("No attr named '" + name + "' in " +
str(self._node_def))
x = self._node_def.attr[name]
# Treat an empty oneof value as an empty list.
if not x.WhichOneof("value"):
return []
if x.HasField("list"):
for f in fields:
if getattr(x.list, f):
return list(getattr(x.list, f))
return []
else:
for f in fields:
if x.HasField(f):
return getattr(x, f)
assert False, "Unsupported field type in " + str(x)
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See @{tf.Session.run}
for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
def NotDifferentiable(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.NotDifferentiable("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Alias for the old name, will be eventually removed.
NoGradient = NotDifferentiable
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs: return None
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
_shape_registry = registry.Registry("shape functions")
_default_shape_function_registry = registry.Registry("default shape functions")
# These are set to common_shapes.call_cpp_shape_fn by op generated code
# (generated by python_op_gen.cc).
# It is set outside ops.py to avoid a circular dependency.
_call_cpp_shape_fn = None
_call_cpp_shape_fn_and_require_op = None
def _set_call_cpp_shape_fn(call_cpp_shape_fn):
"""Sets default shape fns from passed common_shapes.call_cpp_shape_fn."""
global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op
if _call_cpp_shape_fn:
return # already registered
def call_without_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=False)
_call_cpp_shape_fn = call_without_requiring
def call_with_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=True)
_call_cpp_shape_fn_and_require_op = call_with_requiring
class RegisterShape(object):
"""No longer used. Was: A decorator for registering a shape function.
Shape functions must now be registered via the SetShapeFn on the
original Op specification in C++.
"""
def __init__(self, op_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers "f" as the shape function for "op_type"."""
if f is None:
assert _call_cpp_shape_fn
# None is a special "weak" value that provides a default shape function,
# and can be overridden by a non-None registration.
try:
_default_shape_function_registry.register(_call_cpp_shape_fn,
self._op_type)
except KeyError:
# Ignore duplicate registrations of the weak value. This can
# occur if the op library input to wrapper generation
# inadvertently links in one or more of the standard op
# libraries.
pass
else:
_shape_registry.register(f, self._op_type)
return f
def set_shapes_for_outputs(op):
"""Uses the registered shape functions to set the shapes for op's outputs."""
try:
shape_func = _shape_registry.lookup(op.type)
except LookupError:
try:
shape_func = _default_shape_function_registry.lookup(op.type)
except LookupError:
shape_func = _call_cpp_shape_fn_and_require_op
shapes = shape_func(op)
if shapes is None:
raise RuntimeError(
"Shape function for op %s did not return any shapes" % op)
elif isinstance(shapes, dict):
# Returned by call_cpp_shape_fn
shapes_dict = shapes
shapes = shapes_dict["shapes"]
handle_shapes = shapes_dict["handle_shapes"]
handle_dtypes = shapes_dict["handle_dtypes"]
for output, handle_shape, handle_dtype in zip(op.outputs, handle_shapes, handle_dtypes):
# pylint: disable=protected-access
output._handle_shape = handle_shape
output._handle_dtype = handle_dtype
# pylint: enable=protected-access
if len(op.outputs) != len(shapes):
raise RuntimeError(
"Shape function for op %s returned %d shapes but expected %d %s %s" %
(op, len(shapes), len(op.outputs), shape_func.__name__, str(shapes)))
for output, s in zip(op.outputs, shapes):
output.set_shape(s)
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s.",
self.statistic_type, other.statistic_type)
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def _name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if name[-1] == "/" else name
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
A `Graph` contains a set of
@{tf.Operation} objects,
which represent units of computation; and
@{tf.Tensor} objects, which represent
the units of data that flow between operations.
A default `Graph` is always registered, and accessible by calling
@{tf.get_default_graph}.
To add an operation to the default graph, simply call one of the functions
that defines a new `Operation`:
```python
c = tf.constant(4.0)
assert c.graph is tf.get_default_graph()
```
Another typical usage involves the
@{tf.Graph.as_default}
context manager, which overrides the current default graph for the
lifetime of the context:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
@{tf.GraphKeys.GLOBAL_VARIABLES}) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects the core state that may be accessed by multiple readers.
# Only state that can be returned via public accessors (`as_graph_def()`,
# `get_operations()`, `as_graph_element()`, `get_collection()`, and
# `get_collection_ref()`) is by the lock. Thread-safety is provided on a
# best-effort basis to support buggy programs, and is not guaranteed by the
# public `tf.Graph` API.
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.Lock()
self._nodes_by_id = dict() # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = dict() # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Current name stack: uniquified names
self._name_stack = ""
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
# Functions that will be applied to choose a device if none is specified.
self._device_function_stack = []
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
self._control_dependencies_stack = []
# Arbritrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops
self._colocation_stack = []
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = set()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Resource container.
self._container = ""
self._registered_ops = op_def_registry.get_registered_ops()
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op):
"""Adds 'op' to the graph.
Args:
op: the Operator or Tensor to add.
Raises:
TypeError: if op is not an Operation or Tensor.
ValueError: if the op.name or op._id are already used.
"""
self._check_not_finalized()
if not isinstance(op, (Tensor, Operation)):
raise TypeError("op must be a Tensor or Operation: %s" % op)
with self._lock:
# pylint: disable=protected-access
if op._id in self._nodes_by_id:
raise ValueError("cannot add an op with id %d as it already "
"exists in the graph" % op._id)
if op.name in self._nodes_by_name:
raise ValueError("cannot add op with name %s as that name "
"is already used" % op.name)
self._nodes_by_id[op._id] = op
self._nodes_by_name[op.name] = op
self._version = max(self._version, op._id)
# pylint: enable=protected-access
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
@{tf.Graph.graph_def_versions}.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
return self._graph_def_versions
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a @{tf.train.QueueRunner}.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`. Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, context):
"""Sets the current control flow context.
Args:
context: a context object.
"""
self._control_flow_context = context
def _as_graph_def(self, from_version=None, add_shapes=False):
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using @{tf.import_graph_def}) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef`
containing only the nodes that were added to this graph since
its `version` property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each
node with the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
with self._lock:
graph = graph_pb2.GraphDef()
graph.versions.CopyFrom(self._graph_def_versions)
bytesize = 0
for op_id in sorted(self._nodes_by_id):
op = self._nodes_by_id[op_id]
if from_version is None or op_id > from_version:
graph.node.extend([op.node_def])
if op.outputs and add_shapes:
assert "_output_shapes" not in graph.node[-1].attr
graph.node[-1].attr["_output_shapes"].list.shape.extend([
output.get_shape().as_proto() for output in op.outputs])
bytesize += op.node_def.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
if self._functions:
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph.library.gradient.extend([grad_def])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using @{tf.import_graph_def}) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef`
containing only the nodes that were added to this graph since
its `version` property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each
node with the inferred shapes of each of its outputs.
Returns:
A [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return name in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(name, None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
name = function.name
previous = self._functions.get(name, None)
if previous:
raise ValueError("Another function is already defined with that name")
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (
function.python_grad_func is not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
self._functions[name] = function
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
def create_op(self, op_type, inputs, dtypes,
input_types=None, name=None, attrs=None, op_def=None,
compute_shapes=True, compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: A list of `DType` objects that will be the types of the tensors
that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) If True, shape inference will be performed
to compute the shapes of the outputs.
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = _name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, device=None, attrs=attrs)
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
if key not in node_def.attr:
if callable(value):
value = value(node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" %
(key, value))
node_def.attr[key].CopyFrom(value)
# Apply a kernel label if one has been specified for this op_type.
try:
kernel_label = self._op_to_kernel_label_map[op_type]
node_def.attr["_kernel"].CopyFrom(
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
# Apply the overriding op_type for gradients if one has been
# specified for this op_type.
try:
mapped_op_type = self._gradient_override_map[op_type]
node_def.attr["_gradient_op_type"].CopyFrom(
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
control_inputs = self._control_dependencies_for_inputs(inputs)
ret = Operation(node_def, self, inputs=inputs, output_types=dtypes,
control_inputs=control_inputs, input_types=input_types,
original_op=self._default_original_op, op_def=op_def)
if compute_shapes:
set_shapes_for_outputs(ret)
self._add_op(ret)
self._record_op_seen_by_control_dependencies(ret)
if compute_device:
self._apply_device_functions(ret)
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack:
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# Make this device match the device of the colocated op, to
# provide consistency between the device and the colocation
# property.
if ret.device and ret.device != colocation_op.device:
logging.warning("Tried to colocate %s with an op %s that had "
"a different device: %s vs %s. "
"Ignoring colocation property.",
name, colocation_op.name,
ret.device, colocation_op.device)
else:
ret._set_device(colocation_op.device)
all_colocation_groups = sorted(set(all_colocation_groups))
ret.node_def.attr["_class"].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if (self._container and
op_type in self._registered_ops and
self._registered_ops[op_type].is_stateful and
"container" in ret.node_def.attr and
not ret.node_def.attr["container"].s):
ret.node_def.attr["container"].CopyFrom(
attr_value_pb2.AttrValue(s=compat.as_bytes(self._container)))
return ret
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation.
Can also be any object with an `_as_graph_element()` method that returns
a value of one of these types.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs."
% (repr(name), repr(op_name), len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s."
% (repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s."
% (repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s."
% (type(obj).__name__, types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s."
% type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s."
% type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _next_id(self):
"""Id for next Operation instance. Also increments the internal id."""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
return self._next_id_counter
@property
def _last_id(self):
return self._next_id_counter
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly. Use this method with the `with` keyword
to specify that ops created within the scope of a block should be
added to this graph.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
"""
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
"""
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items
without a `name` attribute are never returned if a scope is supplied and
the choice or `re.match` means that a `scope` without special tokens
filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
"""
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
return []
if scope is None:
return list(coll_list)
else:
c = []
regex = re.compile(scope)
for item in coll_list:
if hasattr(item, "name") and regex.match(item.name):
c.append(item)
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
try:
self._default_original_op = op
yield
finally:
self._default_original_op = old_original_op
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def name_scope(self, name):
r"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
try:
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name and name[-1] == "/":
new_stack = _name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
i = self._names_in_use.get(name, 0)
# Increment the number for "name".
if mark_as_used:
self._names_in_use[name] = i + 1
if i > 0:
base_name = name
# Make sure the composed name is not already used.
while name in self._names_in_use:
name = "%s_%d" % (base_name, i)
i += 1
# Mark the composed name as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name] = 1
return name
@contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within
the context, rather than applying all colocation properties
on the stack. If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError(
"Trying to reset colocation (op is None) but "
"ignore_existing is not True")
if op is not None and not isinstance(op, Operation):
# We always want to colocate with the reference op.
op = internal_convert_to_tensor_or_indexed_slices(op, as_ref=True).op
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = []
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = []
if op is not None:
self._colocation_stack.append(op)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
@contextlib.contextmanager
def device(self, device_name_or_function):
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/gpu:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/gpu:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in
the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
"""
if (device_name_or_function is not None
and not callable(device_name_or_function)):
device_function = pydev.merge_device(device_name_or_function)
else:
device_function = device_name_or_function
try:
self._device_function_stack.append(device_function)
yield
finally:
self._device_function_stack.pop()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in reverse order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
for device_function in reversed(self._device_function_stack):
if device_function is None:
break
op._set_device(device_function(op))
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
try:
self._container = container_name
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition
to the current control dependencies. None to indicate that
the dependencies should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs = []
self._new_stack = True
else:
self._control_inputs = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs
def add_op(self, op):
self._seen_nodes.add(op)
def op_in_group(self, op):
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_tensors):
"""For an op that takes `input_tensors` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_tensors: The direct data dependencies for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
input_ops = set([t.op for t in input_tensors])
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend([c for c in controller.control_inputs if c not in input_ops])
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to
AttrValue protocol buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to
kernel label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types)
and isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op
type strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types)
and isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See
@{tf.Graph.device}
for more details.
Args:
device_name_or_function: The device name or function to use in
the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
"""
return get_default_graph().device(device_name_or_function)
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def colocate_with(op, ignore_existing=False):
return get_default_graph().colocate_with(op, ignore_existing)
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See @{tf.Graph.control_dependencies}
for more details.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
try:
self.stack.append(default)
yield default
finally:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects"
% type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack()
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack):
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
_default_graph_stack = _DefaultGraphStack()
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.Session` or `tf.InteractiveSession` is active will result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
"""
_default_graph_stack.reset()
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
if original_item.graph is not item.graph:
raise ValueError(
"%s must be from the same graph as %s." % (item, original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
if get_default_graph().building_function:
return get_default_graph()
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
graph_element = None
if isinstance(op_input, (Operation, _TensorLike)):
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError(
"%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or get_default_graph()
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
@{tf.global_variables}
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
@{tf.trainable_variables}
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
@{tf.summary.merge_all}
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
@{tf.train.start_queue_runners}
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
@{tf.moving_average_variables}
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
* `WEIGHTS`: weights inside neural network layers
* `BIASES`: biases inside neural network layers
* `ACTIVATIONS`: activations of neural network layers
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
@decorator_utils.classproperty
def VARIABLES(cls): # pylint: disable=no-self-argument
logging.warning("VARIABLES collection name is deprecated, "
"please use GLOBAL_VARIABLES instead; "
"VARIABLES will be removed after 2017-03-02.")
return cls.GLOBAL_VARIABLES
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See @{tf.Graph.add_to_collection}
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
"""
get_default_graph().add_to_collection(name, value)
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See @{tf.Graph.add_to_collections}
for more details.
Args:
names: The key for the collections. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
get_default_graph().add_to_collections(names, value)
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See @{tf.Graph.get_collection_ref}
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
"""
return get_default_graph().get_collection_ref(key)
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See @{tf.Graph.get_collection}
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items
without a `name` attribute are never returned if a scope is supplied and
the choice or `re.match` means that a `scope` without special tokens
filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def name_scope(name, default_name=None, values=None):
"""Returns a context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
@{tf.Graph.name_scope}
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Returns:
A context manager for use in defining Python ops. Yields the name scope.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
n = default_name if name is None else name
if n is None and values is not None:
# We only raise an error if values is not None (provided) because currently
# tf.name_scope(None) (values=None then) is sometimes used as an idiom
# to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided." % (
name, default_name))
if values is None:
values = []
g = _get_graph_from_inputs(values)
with g.as_default(), g.name_scope(n) as scope:
yield scope
# pylint: enable=g-doc-return-or-yield
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
else:
return name
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name, proto_type=None, to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
"""Produce a nice error if someone converts an Operation to a Tensor."""
raise TypeError(
("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") %
(op.name, dtype, name, as_ref))
register_tensor_conversion_function(Operation, _operation_conversion_error)
| {
"content_hash": "cd5d1e432a5ad236d277a627fb9d34df",
"timestamp": "",
"source": "github",
"line_count": 4262,
"max_line_length": 98,
"avg_line_length": 35.619427498826845,
"alnum_prop": 0.6596205783545221,
"repo_name": "AsimmHirani/ISpyPi",
"id": "5669878b7417bb308e0bf0f7447814603ecd9b76",
"size": "152500",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/tensorflow-master/tensorflow/python/framework/ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6963"
},
{
"name": "C",
"bytes": "171804"
},
{
"name": "C++",
"bytes": "20840175"
},
{
"name": "CMake",
"bytes": "120545"
},
{
"name": "CSS",
"bytes": "1644"
},
{
"name": "GCC Machine Description",
"bytes": "2"
},
{
"name": "Go",
"bytes": "775176"
},
{
"name": "HTML",
"bytes": "555072"
},
{
"name": "Java",
"bytes": "271861"
},
{
"name": "JavaScript",
"bytes": "17466"
},
{
"name": "Jupyter Notebook",
"bytes": "1833840"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "32953"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "189498"
},
{
"name": "Python",
"bytes": "17362599"
},
{
"name": "Shell",
"bytes": "307916"
},
{
"name": "TypeScript",
"bytes": "772208"
}
],
"symlink_target": ""
} |
with open("Brown-Retagged.txt") as fp:
for line in fp:
line = line.strip().split()
for tuple in line:
word, _, pos = tuple.partition('_')
print '{}\t{}'.format(word, pos)
print '' | {
"content_hash": "177d14688774e474b40a1f4b0dfa4b23",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 47,
"avg_line_length": 33,
"alnum_prop": 0.5021645021645021,
"repo_name": "Sentimentron/Dracula",
"id": "0ae1dd859187db590a1e4018565b35497599d42b",
"size": "231",
"binary": false,
"copies": "1",
"ref": "refs/heads/2016-04-16",
"path": "Data/convert_brown.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "95020"
},
{
"name": "Shell",
"bytes": "747"
}
],
"symlink_target": ""
} |
from ..pakbase import Package
class mfaddoutsidefile(Package):
"""
Add a file for which you have a MODFLOW input file
"""
def __init__(self, model, name, extension, unitnumber):
# call base package constructor
super().__init__(
model, extension, name, unitnumber, allowDuplicates=True
)
self.parent.add_package(self)
def __repr__(self):
return "Outside Package class"
def write_file(self):
pass
| {
"content_hash": "718f4af31df457cc998487627eedaf8e",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 68,
"avg_line_length": 24.2,
"alnum_prop": 0.6053719008264463,
"repo_name": "jentjr/flopy",
"id": "b9935cd24f05502137459c55d8c7452f0362609d",
"size": "484",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "flopy/modflow/mfaddoutsidefile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "832"
},
{
"name": "CSS",
"bytes": "321"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "6353118"
},
{
"name": "Shell",
"bytes": "292"
}
],
"symlink_target": ""
} |
import sys
from PIL import Image
files = sys.argv[1:len(sys.argv)]
print("Frame Count: {}".format(len(files)))
data = []
for f in files:
im = Image.open(f) # Can be many different formats.
pix = im.load()
if im.size != (8, 8):
exit("Image ({}) with incorrect dimensions".format(f))
for y in range(0, 8):
for x in range(0, 8):
for c in range(0, 3):
data.append(pix[x, y][c])
# make file
with open("img.dat", "wb") as out:
# write to file
pixelBytes = bytearray(data)
out.write(pixelBytes)
| {
"content_hash": "89b4b15825f7afff7dc113d29a3d781f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 62,
"avg_line_length": 24.565217391304348,
"alnum_prop": 0.5752212389380531,
"repo_name": "cujomalainey/Smiley-Pack",
"id": "2d5907289222667ec2d078d79c8668f642750724",
"size": "565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "image_converter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3320"
},
{
"name": "Python",
"bytes": "565"
}
],
"symlink_target": ""
} |
"""
__init__.py - Just allows lib to be imported
Created by William Woodall on 2010-10-13.
"""
__author__ = "William Woodall"
__copyright__ = "Copyright (c) 2010 John Harrison, William Woodall"
### Imports ###
# Standard Python Libraries
import sys
import os
try: # try to catch any missing dependancies
# <PKG> for <PURPOSE>
PKGNAME = '<EASY_INSTALL NAME>'
# import <LIBRARY NAME>
del PKGNAME
except ImportError as e: # We are missing something, let them know...
sys.stderr.write("You might not have the "+PKGNAME+" module, try 'easy_install "+PKGNAME+"', else consult google.\n"+e)
### Class ###
### Functions ###
def main():
pass
### IfMain ###
if __name__ == '__main__':
main()
| {
"content_hash": "4a0284f4ce7538eb83d0e85bd53ec06a",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 123,
"avg_line_length": 20.166666666666668,
"alnum_prop": 0.6322314049586777,
"repo_name": "wjwwood/open-robotics-platform",
"id": "1633524e6e0720451bd4e9a6ff2b100818230fb9",
"size": "1941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orpd/lib/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9147"
},
{
"name": "C++",
"bytes": "2987"
},
{
"name": "CMake",
"bytes": "3331"
},
{
"name": "Makefile",
"bytes": "129"
},
{
"name": "Python",
"bytes": "184121"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
DEFAULT_DATE = datetime(2016, 1, 1)
# DAG tests backfill with pooled tasks
# Previously backfill would queue the task but never run it
dag1 = DAG(
dag_id='test_start_date_scheduling',
start_date=datetime.utcnow() + timedelta(days=1))
dag1_task1 = DummyOperator(
task_id='dummy',
dag=dag1,
owner='airflow')
dag2 = DAG(
dag_id='test_task_start_date_scheduling',
start_date=DEFAULT_DATE
)
dag2_task1 = DummyOperator(
task_id='dummy1',
dag=dag2,
owner='airflow',
start_date=DEFAULT_DATE + timedelta(days=3)
)
dag2_task2 = DummyOperator(
task_id='dummy2',
dag=dag2,
owner='airflow'
)
| {
"content_hash": "a787af287766522df175ab93dfb96943",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 59,
"avg_line_length": 24.806451612903224,
"alnum_prop": 0.7022106631989596,
"repo_name": "sid88in/incubator-airflow",
"id": "94e6f8216d226dfea77423b9e248598df56fa5b2",
"size": "1581",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/dags/test_scheduler_dags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69070"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "280685"
},
{
"name": "JavaScript",
"bytes": "1385622"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "4686260"
},
{
"name": "Shell",
"bytes": "34088"
}
],
"symlink_target": ""
} |
'''
Designs oligos for a pre RNA-seq selection method
'''
### imports ###
import sys
import os
import numpy as np
def readFastaFile(fastaFilePath):
'''
Given a path to a multiline fasta file, reads the file, returning two lists - one containing the sequences, the other containing the headers
inputs: path to a fasta file
outputs: a list of the sequences, a list of the sequence headers
'''
sequences = []
headers = []
with open(fastaFilePath) as f:
data = f.readlines()
sequence = ""
for line in data:
if ">" in line:
header = line.replace(">", "").strip()
headers.append(header)
if not sequence == "":
sequences.append(sequence.upper())
sequence = ""
else:
sequence += line.strip()
sequences.append(sequence.upper())
return sequences, headers
def makeOligos(targetSequences, targetLength, outputPath):
'''
Gives all non-unique k-mers of target length that appear in target sequences
inputs: a list of sequences, length of k-mers, path to write output files
outputs: writes the designed oligos to a Fasta file
'''
seenOligos = set()
for i in range(len(targetSequences)):
currentSeq = targetSequences[i]
for j in range(len(targetSequences[i]) - targetLength):
oligo = currentSeq[ j : j + targetLength ]
seenOligos.add(oligo)
# write fasta files
oligos = list(seenOligos)
for i in range(len(oligos)):
outFile = open(outputPath + "/" + oligos[i] + ".fa", "w")
for j in range(1):
outFile.write(">" + str(j) + "\n")
outFile.write(oligos[i] + "\n")
outFile.close()
if __name__ == "__main__":
targetDirectoryPath = sys.argv[1] # path to a directory containing fasta files giving the sequences we want the oligos to hybridize to
targetLength = int(sys.argv[2]) # desired length of oligos
outputPath = sys.argv[3] # path to write output files
# intialize lists
allTargetSequences = []
allTargetHeaders = []
# read in sequences
print("reading target files")
for targetFile in os.listdir(targetDirectoryPath):
print(targetFile)
targetSequences, targetHeaders = readFastaFile(targetDirectoryPath + "/" + targetFile)
allTargetSequences += targetSequences
allTargetHeaders += targetHeaders
print("writing oligo fasta files")
makeOligos(targetSequences, targetLength, outputPath)
| {
"content_hash": "f8bae895c1ae46ed3ef008876a30fb09",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 141,
"avg_line_length": 29.86842105263158,
"alnum_prop": 0.7083700440528634,
"repo_name": "jenhantao/preRNA-seq_OligoDesigner",
"id": "9b79dccaccf94536120a6fb87c1b727639187e2f",
"size": "2270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "makeAllOligos.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "9339"
},
{
"name": "Shell",
"bytes": "1817"
}
],
"symlink_target": ""
} |
BOT_NAME = 'jtr_scrapper'
SPIDER_MODULES = ['jtr_scrapper.spiders']
NEWSPIDER_MODULE = 'jtr_scrapper.spiders'
ITEM_PIPELINES = {
'jtr_scrapper.elasticsearchPipeline.ElasticSearchPipeline':100
}
ELASTICSEARCH_HOST = 'elasticsearch'
ELASTICSEARCH_PORT = 9200
ELASTICSEARCH_FLUSH_LIMIT = 1000
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'jtr_scrapper (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'jtr_scrapper.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'jtr_scrapper.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'jtr_scrapper.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
| {
"content_hash": "be5d39d45503d00694a1002e2c4a77b8",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 109,
"avg_line_length": 33.9390243902439,
"alnum_prop": 0.7786561264822134,
"repo_name": "hikhvar/jtr_scrapper",
"id": "30aa0cf50f3ba980edb42b7e6176f7a20b7afbc6",
"size": "3220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jtr_scrapper/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15074"
}
],
"symlink_target": ""
} |
"""This component provides HA sensor support for Ring Door Bell/Chimes."""
from __future__ import annotations
from dataclasses import dataclass
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PERCENTAGE, SIGNAL_STRENGTH_DECIBELS_MILLIWATT
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.icon import icon_for_battery_level
from . import DOMAIN
from .entity import RingEntityMixin
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up a sensor for a Ring device."""
devices = hass.data[DOMAIN][config_entry.entry_id]["devices"]
entities = [
description.cls(config_entry.entry_id, device, description)
for device_type in ("chimes", "doorbots", "authorized_doorbots", "stickup_cams")
for description in SENSOR_TYPES
if device_type in description.category
for device in devices[device_type]
if not (device_type == "battery" and device.battery_life is None)
]
async_add_entities(entities)
class RingSensor(RingEntityMixin, SensorEntity):
"""A sensor implementation for Ring device."""
entity_description: RingSensorEntityDescription
def __init__(
self,
config_entry_id,
device,
description: RingSensorEntityDescription,
):
"""Initialize a sensor for Ring device."""
super().__init__(config_entry_id, device)
self.entity_description = description
self._extra = None
self._attr_name = f"{device.name} {description.name}"
self._attr_unique_id = f"{device.id}-{description.key}"
@property
def native_value(self):
"""Return the state of the sensor."""
sensor_type = self.entity_description.key
if sensor_type == "volume":
return self._device.volume
if sensor_type == "battery":
return self._device.battery_life
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if (
self.entity_description.key == "battery"
and self._device.battery_life is not None
):
return icon_for_battery_level(
battery_level=self._device.battery_life, charging=False
)
return self.entity_description.icon
class HealthDataRingSensor(RingSensor):
"""Ring sensor that relies on health data."""
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
await super().async_added_to_hass()
await self.ring_objects["health_data"].async_track_device(
self._device, self._health_update_callback
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect callbacks."""
await super().async_will_remove_from_hass()
self.ring_objects["health_data"].async_untrack_device(
self._device, self._health_update_callback
)
@callback
def _health_update_callback(self, _health_data):
"""Call update method."""
self.async_write_ha_state()
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
# These sensors are data hungry and not useful. Disable by default.
return False
@property
def native_value(self):
"""Return the state of the sensor."""
sensor_type = self.entity_description.key
if sensor_type == "wifi_signal_category":
return self._device.wifi_signal_category
if sensor_type == "wifi_signal_strength":
return self._device.wifi_signal_strength
class HistoryRingSensor(RingSensor):
"""Ring sensor that relies on history data."""
_latest_event = None
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
await super().async_added_to_hass()
await self.ring_objects["history_data"].async_track_device(
self._device, self._history_update_callback
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect callbacks."""
await super().async_will_remove_from_hass()
self.ring_objects["history_data"].async_untrack_device(
self._device, self._history_update_callback
)
@callback
def _history_update_callback(self, history_data):
"""Call update method."""
if not history_data:
return
kind = self.entity_description.kind
found = None
if kind is None:
found = history_data[0]
else:
for entry in history_data:
if entry["kind"] == kind:
found = entry
break
if not found:
return
self._latest_event = found
self.async_write_ha_state()
@property
def native_value(self):
"""Return the state of the sensor."""
if self._latest_event is None:
return None
return self._latest_event["created_at"]
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attrs = super().extra_state_attributes
if self._latest_event:
attrs["created_at"] = self._latest_event["created_at"]
attrs["answered"] = self._latest_event["answered"]
attrs["recording_status"] = self._latest_event["recording"]["status"]
attrs["category"] = self._latest_event["kind"]
return attrs
@dataclass
class RingRequiredKeysMixin:
"""Mixin for required keys."""
category: list[str]
cls: type[RingSensor]
@dataclass
class RingSensorEntityDescription(SensorEntityDescription, RingRequiredKeysMixin):
"""Describes Ring sensor entity."""
kind: str | None = None
SENSOR_TYPES: tuple[RingSensorEntityDescription, ...] = (
RingSensorEntityDescription(
key="battery",
name="Battery",
category=["doorbots", "authorized_doorbots", "stickup_cams"],
native_unit_of_measurement=PERCENTAGE,
device_class="battery",
cls=RingSensor,
),
RingSensorEntityDescription(
key="last_activity",
name="Last Activity",
category=["doorbots", "authorized_doorbots", "stickup_cams"],
icon="mdi:history",
device_class=SensorDeviceClass.TIMESTAMP,
cls=HistoryRingSensor,
),
RingSensorEntityDescription(
key="last_ding",
name="Last Ding",
category=["doorbots", "authorized_doorbots"],
icon="mdi:history",
kind="ding",
device_class=SensorDeviceClass.TIMESTAMP,
cls=HistoryRingSensor,
),
RingSensorEntityDescription(
key="last_motion",
name="Last Motion",
category=["doorbots", "authorized_doorbots", "stickup_cams"],
icon="mdi:history",
kind="motion",
device_class=SensorDeviceClass.TIMESTAMP,
cls=HistoryRingSensor,
),
RingSensorEntityDescription(
key="volume",
name="Volume",
category=["chimes", "doorbots", "authorized_doorbots", "stickup_cams"],
icon="mdi:bell-ring",
cls=RingSensor,
),
RingSensorEntityDescription(
key="wifi_signal_category",
name="WiFi Signal Category",
category=["chimes", "doorbots", "authorized_doorbots", "stickup_cams"],
icon="mdi:wifi",
cls=HealthDataRingSensor,
),
RingSensorEntityDescription(
key="wifi_signal_strength",
name="WiFi Signal Strength",
category=["chimes", "doorbots", "authorized_doorbots", "stickup_cams"],
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
icon="mdi:wifi",
device_class="signal_strength",
cls=HealthDataRingSensor,
),
)
| {
"content_hash": "f4d1c6b606636b1f8e0e445f494f6f44",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 93,
"avg_line_length": 31.153256704980844,
"alnum_prop": 0.6266141925962366,
"repo_name": "w1ll1am23/home-assistant",
"id": "1aaa073064f8189370255bfbb963fc32779d3518",
"size": "8131",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ring/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from . import asciiTable
class table_T_S_I_P_(asciiTable.asciiTable):
pass
| {
"content_hash": "1cb13544657124ce4d50efa5e4ef9fe1",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 44,
"avg_line_length": 15.6,
"alnum_prop": 0.7435897435897436,
"repo_name": "googlei18n/TachyFont",
"id": "e34a18c997337619789460bf5c9600a7f2619c0f",
"size": "78",
"binary": false,
"copies": "11",
"ref": "refs/heads/main",
"path": "run_time/src/gae_server/third_party/fonttools/Lib/fontTools/ttLib/tables/T_S_I_P_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "303868"
},
{
"name": "CSS",
"bytes": "313471"
},
{
"name": "HTML",
"bytes": "100691"
},
{
"name": "Java",
"bytes": "24368"
},
{
"name": "JavaScript",
"bytes": "1163591"
},
{
"name": "Python",
"bytes": "169129"
},
{
"name": "Shell",
"bytes": "11939"
}
],
"symlink_target": ""
} |
from pyxb_114.bundles.opengis.citygml.raw.landUse import *
| {
"content_hash": "159593d092f3fcb658e8221148358251",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 58,
"avg_line_length": 59,
"alnum_prop": 0.8135593220338984,
"repo_name": "msherry/PyXB-1.1.4",
"id": "92764eb07290909285c7abfb5ba3ca81d53567d5",
"size": "59",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyxb_114/bundles/opengis/citygml/landUse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6307"
},
{
"name": "Python",
"bytes": "1521054"
},
{
"name": "Shell",
"bytes": "23730"
}
],
"symlink_target": ""
} |
'''
Created on Feb 3, 2013
@author: bpurgaso
'''
from twisted.words.protocols import irc
from twisted.internet import protocol
from twisted.internet import reactor
from twisted.internet import threads
from ConfigManager import ConfigManager
from Authenticator import Authenticator
from subprocess import PIPE, STDOUT, Popen
class bot(irc.IRCClient):
"""
irc bots, yay
"""
def _get_nickname(self):
return self.factory.nickname
nickname = property(_get_nickname)
def reloadConfig(self):
self.config = self.configManager.getConfig()
def signedOn(self):
#Initial Setup
self.configManager = self.factory.configManager
self.configManager.registerListener(self)
self.config = self.configManager.getConfig()
self.auth = self.factory.auth
print "Signed on as %s." % (self.nickname)
for i in self.config['channels'].keys():
if self.config['channels'][i]['autojoin']:
irc.IRCClient.join(self, i, self.config['channels'][i]['key'])
def joined(self, channel):
print "Joined %s." % (channel)
def irc_INVITE(self, prefix, params):
""" called by twisted,
if the bot was invited
"""
channel = params[-1].lower().replace('#', '')
if channel not in self.config['channels'].keys():
self.auth.createChannelEntry(channel)
self.join(channel, self.config['channels'][channel]['key'])
def privmsg(self, user, channel, msg):
'''
Called whenever an inbound message arrives
'''
print user, channel, msg
user = user.rsplit('!', 1)[0]
# Check to see if they're sending me a private message
if channel == self.nickname:
channel = user
index = 0
else:
index = 1
# See if the message directed at me
if msg.startswith(self.nickname + ":") or index == 0:
'''
embedded commands go here
'''
command = msg.rsplit()[index].lower()
#REGISTER
if command == 'register':
if self.auth.isUserAuthorized('register', user):
self.msg(channel, self.auth.registerUser(user, 'default'))
else:
self.msg(channel, "You aren't authorized for register.")
#PROMOTE
elif command == 'promote':
if self.auth.isUserAuthorized('promote', user):
try:
target_uname = msg.rsplit()[index + 1].lower()
target_group = msg.rsplit()[index + 2].lower()
if self.auth.getPowerOfUser(user) <=\
self.auth.getPowerOfGroup(target_group):
self.postToIRC((channel, [self.auth.registerUser(\
target_uname, target_group)]))
else:
self.postToIRC((channel, ['%s, your power level'\
' is'\
' insufficient.' % user]))
except:
self.postToIRC((channel, ['Check your formatting and'\
' try again.']))
else:
self.msg(channel, "You aren't authorized for register.")
#WHOAMI
elif command == 'whoami':
if self.auth.isUserAuthorized('whoami', user):
self.postToIRC((channel, [self.auth.whoami(user)]))
else:
self.msg(channel, "You aren't authorized for register.")
#OPME
elif command == 'opme':
if self.auth.isUserAuthorized('opme', user):
self.mode(channel, set, 'o', None, user)
else:
self.msg(channel, "You aren't authorized for opme.")
#AUTOOP
elif command == 'autoop':
if self.auth.isUserAuthorized('autoop', user):
if msg.rsplit()[2].lower() == 'on':
self.postToIRC((channel, self.auth.toggleAutoOp(\
user, channel, True)))
else:
self.postToIRC((channel, self.auth.toggleAutoOp(\
user, channel, False)))
else:
self.msg(channel, "You aren't authorized for autoop.")
#HELP
elif command == 'help':
if self.auth.isUserAuthorized('help', user):
for i in self.auth.getAvailableCommandsForUser(user):
self.msg(user, '%s: %s' %\
(i, self.auth.getHelpForCommand(i)))
self.msg(channel, 'I\'ve sent you a pm.')
else:
self.msg(channel, "You aren't authorized for help.")
#RELOAD
elif command == 'reload':
if self.auth.isUserAuthorized('reload', user):
self.configManager.reload()
self.msg(channel, "Configuration Reloaded")
if not self.auth.sanityCheck(False):
self.msg(channel, "Configuration Sanity is suspect, "\
"rolling back.")
else:
self.msg(channel, "You aren't authorized for reload.")
#KICK
elif command == 'kick':
if self.auth.isUserAuthorized('kick', user):
if self.nickname not in msg.rsplit()[index + 1:]:
for i in msg.rsplit()[index + 1:]:
self.kick(channel, i, 'Later broseph.')
else:
self.msg(channel, "Nope, not happening.")
else:
self.kick(channel, user, 'Sorry bro, nothing personal.')
else:
'''
External script execution goes here
'''
if self.auth.isUserAuthorized(msg.rsplit()[index].lower(),\
user):
#kick off the async call
#channel, command, params
self.invokeCommand(channel,\
command,\
(" ".join(msg.rsplit()[index + 1:])))
else:
self.msg(channel, "You aren't authorized for %s." %\
(command))
else:
'''
filter processing go here
'''
pass
def invokeCommand(self, channel, command, params):
tmp = threads.deferToThread(self.__shellCall, channel, command, params)
tmp.addCallback(self.postToIRC)
def __shellCall(self, channel, command, params):
command = self.sanitize(command)
params = self.sanitize(params)
command = "exec python ./bin/%s.py %s 2> /dev/null" % (command, params)
self.p = Popen(
command,
stderr=STDOUT,
stdout=PIPE,
close_fds=True,
shell=True)
out, err = self.p.communicate() # @UnusedVariable
return (channel, out.splitlines())
def sanitize(self, s):
for i in self.config['sanitize']:
s = s.replace(i, '')
return s
def postToIRC(self, tpl):
for i in tpl[1]:
self.msg(tpl[0], i)
def userJoined(self, user, channel):
channel_dict = channel.replace('#', '')
if self.config['channels'][channel_dict]['enable_autoop'] and\
user in self.config['channels'][channel_dict]['autoop']:
self.mode(channel, set, 'o', None, user)
if self.config['channels'][channel_dict]['enable_greeting']:
self.msg(channel, "%s: %s" % (user,\
self.config['channels'][channel_dict]['greeting']))
def kickedFrom(self, channel, kicker, message):
""" called by twisted,
if the bot was kicked
"""
channel = channel.replace('#', '')
if channel in self.config['channels'].keys() and\
self.config['channels'][channel]['autojoin']:
self.join(channel, self.config['channels'][channel]['key'])
self.msg(kicker, "Why would you do that to me brah?")
class botFactory(protocol.ClientFactory):
"""
Factory for producing "bot"
"""
protocol = bot
def __init__(self, channel, configManager, auth):
self.startChannel = channel
self.configManager = configManager
self.config = self.configManager.getConfig()
self.auth = auth
#required
self.nickname = self.config['nick']
def clientConnectionLost(self, connector, reason):
print "Lost connection (%s), reconnecting." % (reason)
connector.connect()
def clientConnectionFailed(self, connector, reason):
print "Could not connect: %s" % (reason)
class Hydra(object):
'''
The big bad scary bot
'''
def __init__(self):
self.startChannel = '#hydra'
self.configManager = ConfigManager()
self.config = self.configManager.getConfig()
self.configManager.registerListener(self)
self.auth = Authenticator(self.configManager)
n = self.config['network']
p = self.config['port']
b = botFactory(self.startChannel, self.configManager, self.auth)
reactor.connectTCP(n, p, b) # @UndefinedVariable
reactor.run() # @UndefinedVariable
def reloadConfig(self):
self.config = self.configManager.getConfig()
### dummy code below
h = Hydra()
| {
"content_hash": "c181e81ba69ee9bc96a7fbb2d3b6dd3d",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 79,
"avg_line_length": 37.70943396226415,
"alnum_prop": 0.5065545882117483,
"repo_name": "bpurgaso/hydra-ircbot",
"id": "715a4796b8c6931758d4eaebe578d13178d94e13",
"size": "9993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hydra/Hydra.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19406"
}
],
"symlink_target": ""
} |
import logging
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# import the queues
from portality.tasks.redis_huey import main_queue
# now import the tasks that will bind to those queues
# these are the ones which bind to the main_queue
from portality.tasks.reporting import scheduled_reports, run_reports
from portality.tasks.journal_in_out_doaj import set_in_doaj
from portality.tasks.sitemap import scheduled_sitemap, generate_sitemap
from portality.tasks.journal_bulk_edit import journal_bulk_edit
from portality.tasks.suggestion_bulk_edit import suggestion_bulk_edit
from portality.tasks.ingestarticles import ingest_articles
from portality.tasks.preservation import preserve
from portality.tasks.journal_csv import scheduled_journal_csv, journal_csv
from portality.tasks.read_news import scheduled_read_news, read_news
from portality.tasks.journal_bulk_delete import journal_bulk_delete
from portality.tasks.article_bulk_delete import article_bulk_delete
from portality.tasks.async_workflow_notifications import async_workflow_notifications
from portality.tasks.check_latest_es_backup import scheduled_check_latest_es_backup, check_latest_es_backup
from portality.tasks.request_es_backup import scheduled_request_es_backup, request_es_backup
| {
"content_hash": "e670c3d7bc591d5b438c44ff180bab91",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 107,
"avg_line_length": 54.75,
"alnum_prop": 0.8378995433789954,
"repo_name": "DOAJ/doaj",
"id": "527ee368cf7743d9678738df0bac4d78a455c877",
"size": "1543",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "portality/tasks/consumer_main_queue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2399"
},
{
"name": "Dockerfile",
"bytes": "59"
},
{
"name": "HTML",
"bytes": "483733"
},
{
"name": "JavaScript",
"bytes": "952971"
},
{
"name": "Jinja",
"bytes": "15292"
},
{
"name": "Python",
"bytes": "3195030"
},
{
"name": "SCSS",
"bytes": "75276"
},
{
"name": "Shell",
"bytes": "28415"
}
],
"symlink_target": ""
} |
from grid import *
import time
count = int(input("How many times to use item: "))
input("Place mouse over center of inventory item to use and press enter.")
x, y = Mouse.get_position()
for i in range(count):
Mouse.click(x, y)
time.sleep(0.100)
Mouse.click(x + 50, y + 15)
time.sleep(0.100)
| {
"content_hash": "d6ad930c4ca343eae1fed5195a0fdcf1",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 74,
"avg_line_length": 20.733333333333334,
"alnum_prop": 0.6559485530546624,
"repo_name": "jshumaker/LoA",
"id": "dcaf90ff863601d57cf942ee53ddf1dc45eff308",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "use_inventory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "122020"
}
],
"symlink_target": ""
} |
'''Test that mouse cursor can be set to a platform-dependent image.
Expected behaviour:
One window will be opened. Press the left and right arrow keys to cycle
through the system mouse cursors. The current cursor selected will
be printed to the terminal.
Note that not all cursors are unique on each platform; for example,
if a platform doesn't define a cursor for a given name, a suitable
replacement (e.g., a plain arrow) will be used instead.
Close the window or press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: WINDOW_SET_MOUSE_VISIBLE.py 703 2007-02-28 14:18:00Z Alex.Holkner $'
import unittest
from pyglet import window
from pyglet.window import key
from pyglet.gl import *
class WINDOW_SET_MOUSE_PLATFORM_CURSOR(unittest.TestCase):
i = 0
def on_key_press(self, symbol, modifiers):
names = [
self.w.CURSOR_DEFAULT,
self.w.CURSOR_CROSSHAIR,
self.w.CURSOR_HAND,
self.w.CURSOR_HELP,
self.w.CURSOR_NO,
self.w.CURSOR_SIZE,
self.w.CURSOR_SIZE_UP,
self.w.CURSOR_SIZE_UP_RIGHT,
self.w.CURSOR_SIZE_RIGHT,
self.w.CURSOR_SIZE_DOWN_RIGHT,
self.w.CURSOR_SIZE_DOWN,
self.w.CURSOR_SIZE_DOWN_LEFT,
self.w.CURSOR_SIZE_LEFT,
self.w.CURSOR_SIZE_UP_LEFT,
self.w.CURSOR_SIZE_UP_DOWN,
self.w.CURSOR_SIZE_LEFT_RIGHT,
self.w.CURSOR_TEXT,
self.w.CURSOR_WAIT,
self.w.CURSOR_WAIT_ARROW,
]
if symbol == key.ESCAPE:
self.w.on_close()
if symbol == key.RIGHT:
self.i = (self.i + 1) % len(names)
elif symbol == key.LEFT:
self.i = (self.i - 1) % len(names)
cursor = self.w.get_system_mouse_cursor(names[self.i])
self.w.set_mouse_cursor(cursor)
print 'Set cursor to "%s"' % names[self.i]
return True
def test_set_visible(self):
self.width, self.height = 200, 200
self.w = w = window.Window(self.width, self.height)
w.push_handlers(self)
while not w.has_exit:
glClear(GL_COLOR_BUFFER_BIT)
w.flip()
w.dispatch_events()
w.close()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "b963306293bc33335f6c357f2f754b39",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 88,
"avg_line_length": 33.225352112676056,
"alnum_prop": 0.5875370919881305,
"repo_name": "oktayacikalin/pyglet",
"id": "aed027c43a7227c5530bd0135b263c6bc281cf1f",
"size": "2382",
"binary": false,
"copies": "3",
"ref": "refs/heads/pyglet-1.2-maintenance",
"path": "tests/window/WINDOW_SET_MOUSE_SYSTEM_CURSOR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1828"
},
{
"name": "HTML",
"bytes": "1652"
},
{
"name": "JavaScript",
"bytes": "6751"
},
{
"name": "PHP",
"bytes": "2190"
},
{
"name": "Python",
"bytes": "6504448"
},
{
"name": "Shell",
"bytes": "251"
}
],
"symlink_target": ""
} |
import pmxbot
from pmxbot.core import regexp, contains, command
from pmxbot.core import ContainsHandler
import httplib2
import json
import re
import logging
import six
import random
log = logging.getLogger(__name__)
class RegexpFindHandler(ContainsHandler):
class_priority = 4
def __init__(self, *args, **kwargs):
super(RegexpFindHandler, self).__init__(*args, **kwargs)
if isinstance(self.pattern, six.string_types):
self.pattern = re.compile(self.pattern, re.IGNORECASE)
def match(self, message, channel):
return self.pattern.findall(message)
def process(self, message):
return self.pattern.findall(message)
def regexpfind(name, regexp, doc=None, **kwargs):
return RegexpFindHandler(
name=name,
doc=doc,
pattern=regexp,
**kwargs
).decorate
def getticket(ticketnum):
h = httplib2.Http(".cache")
try:
resp, content = h.request("%s/issues/%s.json" %
(pmxbot.config.redmine_url, ticketnum),
"GET",
headers={'X-Redmine-API-Key':
pmxbot.config.redmine_apikey})
except:
log.exception("Error retrieving ticket %s", ticketnum)
if resp['status'] == '404':
return
if resp['status'] == '403':
return
try:
tjson = json.loads(content.decode('utf-8'))
except ValueError:
return ("Received invalid json from %s/issues/%s.json" %
(pmxbot.config.redmine_url, tnumber))
if 'assigned_to' not in tjson['issue']:
tjson['issue']['assigned_to'] = {'name': 'nobody'}
return tjson
def getprojects():
h = httplib2.Http(".cache")
try:
resp, content = h.request("%s/projects.json" %
(pmxbot.config.redmine_url), "GET",
headers={'X-Redmine-API-Key':
pmxbot.config.redmine_apikey})
except:
log.exception("Error retrieving projects")
if resp['status'] == '404':
return
if resp['status'] == '403':
return
try:
pjson = json.loads(content.decode('utf-8'))
except ValueError:
return ("Received invalid json from %s/projects.json" %
(pmxbot.config.redmine_url))
return pjson
@command("build")
def getlatestbuild(client, event, channel, nick, rest):
if (not pmxbot.config.redmine_apikey or not
pmxbot.config.redmine_url or not
pmxbot.config.redmine_chan_proj_mapping or not
pmxbot.config.redmine_default_project):
return
h = httplib2.Http(".cache")
try:
resp, content = h.request("%s/projects/%s/versions.json" %
(pmxbot.config.redmine_url,
pmxbot.config.redmine_default_project),
"GET",
headers={'X-Redmine-API-Key':
pmxbot.config.redmine_apikey})
except:
log.exception("Error retrieving builds")
if resp['status'] == '404':
return
if resp['status'] == '403':
return
try:
latest_build = json.loads(content.decode('utf-8'))['versions'][-2]['name']
except ValueError:
yield ("Received invalid json from %s/projects/%s/versions.json" %
(pmxbot.config.redmine_url, pmxbot.config.redmine_default_project))
yield ("The latest version is: %s" % (latest_build))
def projectChanWhitelist(ticketNum, channel):
pjson = getprojects()
pIds = {p['id']: p['identifier'] for p in pjson['projects']}
ticket = getticket(ticketNum)
try:
ticketId = ticket['issue']['project']['id']
except TypeError:
return
try:
if pIds[ticketId] in pmxbot.config.redmine_chan_proj_mapping[channel]:
return ticket
except:
pass
return
@regexpfind("redmine", r"#(\d+)")
def redmine(client, event, channel, nick, tickets):
if (not pmxbot.config.redmine_apikey or not
pmxbot.config.redmine_url or not
pmxbot.config.redmine_chan_proj_mapping):
return
ticklist = []
for ticketnum in tickets:
ticket = projectChanWhitelist(ticketnum, channel)
if ticket is not None:
ticklist.append(ticket)
for tick in ticklist:
if tick is not None:
yield ("%s: %sissues/%s" %
(nick, pmxbot.config.redmine_url, tick['issue']['id']))
@command("bug")
def redmine_bug(client, event, channel, nick, rest):
if (not pmxbot.config.redmine_apikey or not
pmxbot.config.redmine_url or not
pmxbot.config.redmine_chan_proj_mapping):
return
p = re.compile('(\d+).*')
ticket = p.match(rest).group(1)
if not ticket.isdigit():
return
tick = projectChanWhitelist(ticket, channel)
if tick is not None:
yield ("%s: %s is %sissues/%s \"%s - %s: %s\". Its status is %s and "
"is assigned to %s" %
(nick, tick['issue']['id'], pmxbot.config.redmine_url,
tick['issue']['id'], tick['issue']['project']['name'],
tick['issue']['tracker']['name'], tick['issue']['subject'],
tick['issue']['status']['name'],
tick['issue']['assigned_to']['name']))
| {
"content_hash": "439166f2de39948ddd9816400530a279",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 83,
"avg_line_length": 33.41463414634146,
"alnum_prop": 0.5598540145985401,
"repo_name": "cryptk/pmxbot_redmine",
"id": "c97ebfd24bac12e6c0be826c8bc6bd674fb8982d",
"size": "5504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pmxbot_redmine/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6374"
}
],
"symlink_target": ""
} |
import pytest
from .common import * # NOQA
from rancher import ApiError
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None}
RBAC_ROLES = [CLUSTER_OWNER, PROJECT_MEMBER, PROJECT_OWNER,
PROJECT_READ_ONLY, CLUSTER_MEMBER]
WORKLOAD_TYPES = ["daemonSet", "statefulSet", "cronJob", "job"]
if_check_lb = os.environ.get('RANCHER_CHECK_FOR_LB', "False")
if_check_lb = pytest.mark.skipif(
if_check_lb != "True",
reason='Lb test case skipped')
ENABLE_HOST_NODE_PORT_TESTS = ast.literal_eval(
os.environ.get('RANCHER_ENABLE_HOST_NODE_PORT_TESTS', "True"))
skip_host_node_port = pytest.mark.skipif(
not ENABLE_HOST_NODE_PORT_TESTS,
reason='Tests Skipped for AKS,GKE,EKS Clusters')
def test_wl_sidekick():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("sidekick")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, workload, "deployment", ns.name)
side_con = {"name": "test2",
"image": TEST_IMAGE_NGINX,
"stdin": True,
"tty": True}
con.append(side_con)
workload = p_client.update(workload,
containers=con)
time.sleep(90)
validate_workload_with_sidekicks(
p_client, workload, "deployment", ns.name)
def test_wl_deployment():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, workload, "deployment", ns.name)
def test_wl_statefulset():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
statefulSetConfig={}
)
validate_workload(p_client, workload, "statefulSet", ns.name)
def test_wl_daemonset():
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
schedulable_node_count = len(get_schedulable_nodes(cluster))
validate_workload(p_client, workload, "daemonSet",
ns.name, schedulable_node_count)
def test_wl_cronjob():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
cronJobConfig={
"concurrencyPolicy": "Allow",
"failedJobsHistoryLimit": 10,
"schedule": "*/1 * * * *",
"successfulJobsHistoryLimit": 10})
validate_workload(p_client, workload, "cronJob", ns.name)
def test_wl_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
revisions = workload.revisions()
assert len(revisions) == 1
for revision in revisions:
if revision["containers"][0]["image"] == TEST_IMAGE:
firstrevision = revision.id
con = [{"name": "test1",
"image": TEST_IMAGE_NGINX}]
p_client.update(workload, containers=con)
wait_for_pod_images(p_client, workload, ns.name, TEST_IMAGE_NGINX, 2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, TEST_IMAGE_NGINX, ns)
revisions = workload.revisions()
assert len(revisions) == 2
for revision in revisions:
if revision["containers"][0]["image"] == TEST_IMAGE_NGINX:
secondrevision = revision.id
con = [{"name": "test1",
"image": TEST_IMAGE_OS_BASE,
"tty": True,
"stdin": True}]
p_client.update(workload, containers=con)
wait_for_pod_images(p_client, workload, ns.name, TEST_IMAGE_OS_BASE, 2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, TEST_IMAGE_OS_BASE, ns)
revisions = workload.revisions()
assert len(revisions) == 3
for revision in revisions:
if revision["containers"][0]["image"] == TEST_IMAGE_OS_BASE:
thirdrevision = revision.id
p_client.action(workload, "rollback", replicaSetId=firstrevision)
wait_for_pod_images(p_client, workload, ns.name, TEST_IMAGE, 2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, TEST_IMAGE, ns)
p_client.action(workload, "rollback", replicaSetId=secondrevision)
wait_for_pod_images(p_client, workload, ns.name, TEST_IMAGE_NGINX, 2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, TEST_IMAGE_NGINX, ns)
p_client.action(workload, "rollback", replicaSetId=thirdrevision)
wait_for_pod_images(p_client, workload, ns.name, TEST_IMAGE_OS_BASE, 2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, TEST_IMAGE_OS_BASE, ns)
def test_wl_pod_scale_up():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
workload = wait_for_wl_to_active(p_client, workload)
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns.name
allpods = execute_kubectl_cmd(get_pods)
wait_for_pods_in_workload(p_client, workload, 1)
p_client.update(workload, scale=2, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_pods_are_running_by_id(allpods, workload, ns.name)
for key, value in workload.workloadLabels.items():
label = key + "=" + value
allpods = execute_kubectl_cmd(get_pods)
wait_for_pods_in_workload(p_client, workload, 2)
p_client.update(workload, scale=3, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 3)
validate_pods_are_running_by_id(allpods, workload, ns.name)
def test_wl_pod_scale_down():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=3)
wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 3)
p_client.update(workload, scale=2, containers=con)
wait_for_pods_in_workload(p_client, workload, 2)
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns.name
allpods = execute_kubectl_cmd(get_pods)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_pods_are_running_by_id(allpods, workload, ns.name)
p_client.update(workload, scale=1, containers=con)
wait_for_pods_in_workload(p_client, workload, 1)
for key, value in workload.workloadLabels.items():
label = key + "=" + value
allpods = execute_kubectl_cmd(get_pods)
validate_workload(p_client, workload, "deployment", ns.name)
validate_pods_are_running_by_id(allpods, workload, ns.name)
def test_wl_pause_orchestration():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=2)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
p_client.action(workload, "pause")
validate_workload_paused(p_client, workload, True)
con = [{"name": "test1",
"image": TEST_IMAGE_NGINX}]
p_client.update(workload, containers=con)
validate_pod_images(TEST_IMAGE, workload, ns.name)
p_client.action(workload, "resume")
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload_paused(p_client, workload, False)
validate_pod_images(TEST_IMAGE_NGINX, workload, ns.name)
# Windows could not support host port for now.
@skip_test_windows_os
@skip_host_node_port
def test_wl_with_hostPort():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 9999
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "HostPort",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
workload = wait_for_wl_to_active(p_client, workload)
validate_hostPort(p_client, workload, source_port, namespace["cluster"])
@skip_host_node_port
def test_wl_with_nodePort():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 30456
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "NodePort",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
workload = wait_for_wl_to_active(p_client, workload)
validate_nodePort(p_client, workload, namespace["cluster"], source_port)
def test_wl_with_clusterIp():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 30458
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "ClusterIP",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
workload = wait_for_wl_to_active(p_client, workload)
# Get cluster Ip
sd_records = p_client.list_dns_record(name=name).data
assert len(sd_records) == 1
cluster_ip = sd_records[0].clusterIp
# Deploy test pods used for clusteIp resolution check
wlname = random_test_name("testclusterip-client")
con = [{"name": "test1",
"image": TEST_IMAGE}]
workload_for_test = p_client.create_workload(name=wlname,
containers=con,
namespaceId=ns.id,
scale=2)
wait_for_wl_to_active(p_client, workload_for_test)
test_pods = wait_for_pods_in_workload(p_client, workload_for_test, 2)
validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port)
@if_check_lb
def test_wl_with_lb():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 9001
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "LoadBalancer",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
workload = wait_for_wl_to_active(p_client, workload)
validate_lb(p_client, workload, source_port)
def test_wl_with_clusterIp_scale_and_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 30459
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "ClusterIP",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test-cluster-ip",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("cluster-ip-scale-upgrade")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=1)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
sd_records = p_client.list_dns_record(name=name).data
assert len(sd_records) == 1
cluster_ip = sd_records[0].clusterIp
# get test pods
wlname = random_test_name("testclusterip-client")
wl_con = [{"name": "test1", "image": TEST_IMAGE}]
workload_for_test = p_client.create_workload(name=wlname,
containers=wl_con,
namespaceId=ns.id,
scale=2)
wait_for_wl_to_active(p_client, workload_for_test)
test_pods = wait_for_pods_in_workload(p_client, workload_for_test, 2)
validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port)
# scale up
p_client.update(workload, scale=3, caontainers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 3)
validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port)
# scale down
p_client.update(workload, scale=2, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port)
# upgrade
con = [{"name": "test-cluster-ip-upgrade-new",
"image": TEST_IMAGE,
"ports": [port]}]
p_client.update(workload, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port)
@skip_host_node_port
def test_wl_with_nodePort_scale_and_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 30457
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "NodePort",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("test-node-port-scale-upgrade")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=1)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
validate_nodePort(p_client, workload, namespace["cluster"], source_port)
# scale up
p_client.update(workload, scale=3, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 3)
validate_nodePort(p_client, workload, namespace["cluster"], source_port)
# scale down
p_client.update(workload, scale=2, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_nodePort(p_client, workload, namespace["cluster"], source_port)
# upgrade
con = [{"name": "test-node-port-scale-upgrade-new",
"image": TEST_IMAGE,
"ports": [port]}]
p_client.update(workload, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_nodePort(p_client, workload, namespace["cluster"], source_port)
# Windows could not support host port for now.
@skip_test_windows_os
@skip_host_node_port
def test_wl_with_hostPort_scale_and_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 8888
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "HostPort",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test-host-port-upgrade",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("hostport-scale")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=1)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
validate_hostPort(p_client, workload, source_port, namespace["cluster"])
# scale up
p_client.update(workload, scale=2, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_hostPort(p_client, workload, source_port, namespace["cluster"])
# scale down
p_client.update(workload, scale=1, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
validate_hostPort(p_client, workload, source_port, namespace["cluster"])
# From my observation, it is necessary to wait until
# the number of pod equals to the expected number,
# since the workload's state is 'active' but pods
# are not ready yet especially after scaling down and upgrading.
# upgrade
con = [{"name": "test-host-port-upgrade-new",
"image": TEST_IMAGE,
"ports": [port]}]
p_client.update(workload, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
validate_hostPort(p_client, workload, source_port, namespace["cluster"])
@if_check_lb
def test_wl_with_lb_scale_and_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 9001
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "LoadBalancer",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("lb-scale-upgrade")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=1)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
validate_lb(p_client, workload, source_port)
# scale up
p_client.update(workload, scale=3, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 3)
validate_lb(p_client, workload, source_port)
# scale down
p_client.update(workload, scale=2, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_lb(p_client, workload, source_port)
# upgrade
con = [{"name": "test-load-balance-upgrade-new",
"image": TEST_IMAGE,
"ports": [port]}]
p_client.update(workload, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_lb(p_client, workload, source_port)
# --------------------- rbac tests for cluster owner -----------------------
@if_test_rbac
def test_rbac_cluster_owner_wl_create(remove_resource):
# cluster owner can create project and deploy workload in it
p_client, project, ns, workload = setup_project_by_role(CLUSTER_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
@if_test_rbac
def test_rbac_cluster_owner_wl_create_2(remove_resource):
# cluster owner can deploy workload in any project in the cluster
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p2 = rbac_get_unshared_project()
p_client2 = get_project_client_for_token(p2, user_token)
ns2 = rbac_get_unshared_ns()
name = random_test_name("default")
con = [{"name": "test1",
"image": TEST_IMAGE}]
wl = p_client2.create_workload(name=name, containers=con,
namespaceId=ns2.id)
validate_workload(p_client2, wl, "deployment", ns2.name)
remove_resource(wl)
@if_test_rbac
def test_rbac_cluster_owner_wl_edit(remove_resource):
p_client, project, ns, workload = setup_project_by_role(CLUSTER_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# cluster owner can edit workload in the project
p_client.update(workload, scale=2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
con = [{"name": "test1",
"image": "nginx"}]
p_client.update(workload, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, "nginx", ns)
@if_test_rbac
def test_rbac_cluster_owner_wl_delete(remove_resource):
p_client, project, ns, workload = setup_project_by_role(CLUSTER_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# cluster owner can delete workload in the project
p_client.delete(workload)
assert len(p_client.list_workload(uuid=workload.uuid).data) == 0
# --------------------- rbac tests for cluster member -----------------------
@if_test_rbac
def test_rbac_cluster_member_wl_create(remove_resource):
# cluster member can create project and deploy workload in it
p_client, project, ns, workload = setup_project_by_role(CLUSTER_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
@if_test_rbac
def test_rbac_cluster_member_wl_create_2():
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
# cluster member can NOT deploy workload in the project he can NOT access
with pytest.raises(ApiError) as e:
p2 = rbac_get_unshared_project()
ns2 = rbac_get_unshared_ns()
new_p_client = get_project_client_for_token(p2, user_token)
new_p_client.create_workload(name=name, containers=con,
namespaceId=ns2.id)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_cluster_member_wl_edit(remove_resource):
p_client, project, ns, workload = setup_project_by_role(CLUSTER_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# cluster member can edit workload in the project
p_client.update(workload, scale=2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
con = [{"name": "test1", "image": "nginx"}]
p_client.update(workload, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, "nginx", ns)
@if_test_rbac
def test_rbac_cluster_member_wl_delete(remove_resource):
p_client, project, ns, workload = setup_project_by_role(CLUSTER_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# cluster member can delete workload in the project
p_client.delete(workload)
assert len(p_client.list_workload(uuid=workload.uuid).data) == 0
# --------------------- rbac tests for project member -----------------------
@if_test_rbac
def test_rbac_project_member_wl_create(remove_resource):
# project member can deploy workload in his project
p_client, project, ns, workload = setup_project_by_role(PROJECT_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
@if_test_rbac
def test_rbac_project_member_wl_create_2():
# project member can NOT deploy workload in the project he can NOT access
user_token = rbac_get_user_token_by_role(PROJECT_MEMBER)
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
with pytest.raises(ApiError) as e:
p2 = rbac_get_unshared_project()
ns2 = rbac_get_unshared_ns()
new_p_client = get_project_client_for_token(p2, user_token)
new_p_client.create_workload(name=name, containers=con,
namespaceId=ns2.id)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_project_member_wl_edit(remove_resource):
p_client, project, ns, workload = setup_project_by_role(PROJECT_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# project member can edit workload in the project
p_client.update(workload, scale=2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
con = [{"name": "test1", "image": "nginx"}]
p_client.update(workload, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, "nginx", ns)
@if_test_rbac
def test_rbac_project_member_wl_delete(remove_resource):
p_client, project, ns, workload = setup_project_by_role(PROJECT_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# project member can delete workload in the project
p_client.delete(workload)
assert len(p_client.list_workload(uuid=workload.uuid).data) == 0
# --------------------- rbac tests for project owner -----------------------
@if_test_rbac
def test_rbac_project_owner_wl_create(remove_resource):
# project owner can deploy workload in his project
p_client, project, ns, workload = setup_project_by_role(PROJECT_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
@if_test_rbac
def test_rbac_project_owner_wl_create_2():
# project owner can NOT deploy workload in the project he can NOT access
user_token = rbac_get_user_token_by_role(PROJECT_OWNER)
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
with pytest.raises(ApiError) as e:
p2 = rbac_get_unshared_project()
ns2 = rbac_get_unshared_ns()
new_p_client = get_project_client_for_token(p2, user_token)
new_p_client.create_workload(name=name, containers=con,
namespaceId=ns2.id)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_project_owner_wl_edit(remove_resource):
p_client, project, ns, workload = setup_project_by_role(PROJECT_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# project owner can edit workload in his project
p_client.update(workload, scale=2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
con = [{"name": "test1", "image": "nginx"}]
p_client.update(workload, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, "nginx", ns)
@if_test_rbac
def test_rbac_project_owner_wl_delete(remove_resource):
p_client, project, ns, workload = setup_project_by_role(PROJECT_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# project owner can delete workload in his project
p_client.delete(workload)
assert len(p_client.list_workload(uuid=workload.uuid).data) == 0
# --------------------- rbac tests for project read-only --------------------
@if_test_rbac
def test_rbac_project_read_only_wl_create():
# project read-only can NOT deploy workloads in the project
project = rbac_get_project()
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
p_client = get_project_client_for_token(project, user_token)
ns = rbac_get_namespace()
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
with pytest.raises(ApiError) as e:
p_client.create_workload(name=name, containers=con,
namespaceId=ns.id)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_project_read_only_wl_edit(remove_resource):
project = rbac_get_project()
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
p_client = get_project_client_for_token(project, user_token)
# deploy a workload as cluster owner
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster_owner_p_client = get_project_client_for_token(project,
cluster_owner_token)
ns = rbac_get_namespace()
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = cluster_owner_p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
# project read-only can NOT edit existing workload
with pytest.raises(ApiError) as e:
p_client.update(workload, scale=2)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
@if_test_rbac
def test_rbac_project_read_only_wl_list():
# project read-only can NOT see workloads in the project he has no access
p2 = rbac_get_unshared_project()
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
p_client = get_project_client_for_token(p2, user_token)
workloads = p_client.list_workload().data
assert len(workloads) == 0
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(
USER_TOKEN, cluster, random_test_name("testworkload"))
p_client = get_project_client_for_token(p, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
def fin():
client = get_user_client()
client.delete(namespace["project"])
request.addfinalizer(fin)
def setup_project_by_role(role, remove_resource):
""" set up a project for a specific role used for rbac testing
- for cluster owner or cluster member:
it creates a project and namespace, then deploys a workload
- for project owner or project member:
it deploys a workload to the existing project and namespace
"""
user_token = rbac_get_user_token_by_role(role)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
if role in [CLUSTER_OWNER, CLUSTER_MEMBER]:
project, ns = create_project_and_ns(user_token, namespace["cluster"],
random_test_name("test-rbac"))
p_client = get_project_client_for_token(project, user_token)
workload = p_client.create_workload(name=name, containers=con,
namespaceId=ns.id)
remove_resource(project)
remove_resource(ns)
remove_resource(workload)
return p_client, project, ns, workload
elif role in [PROJECT_OWNER, PROJECT_MEMBER]:
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
workload = p_client.create_workload(name=name, containers=con,
namespaceId=ns.id)
remove_resource(workload)
return p_client, project, ns, workload
else:
return None, None, None, None
# --------------------- rbac tests by workload types -----------------------
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametarize_create(role, config, remove_resource):
p_client, project, ns = setup_wl_project_by_role(role)
cluster = namespace["cluster"]
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
if role != PROJECT_READ_ONLY:
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
remove_resource(workload)
if role == CLUSTER_MEMBER:
remove_resource(project)
return None
else:
with pytest.raises(ApiError) as e:
workload = create_workload_by_type(p_client, name, con, ns, config)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_create_negative(role, remove_resource, config):
if role == CLUSTER_OWNER:
# cluster owner can deploy workloads in any project in the cluster
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p2 = rbac_get_unshared_project()
p_client2 = get_project_client_for_token(p2, user_token)
ns2 = rbac_get_unshared_ns()
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
wl = create_workload_by_type(p_client2, name, con, ns2, config)
wait_for_wl_to_active(p_client2, wl)
remove_resource(wl)
else:
# roles cannot deploy workloads in projects they cannot access
user_token = rbac_get_user_token_by_role(role)
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
with pytest.raises(ApiError) as e:
p2 = rbac_get_unshared_project()
ns2 = rbac_get_unshared_ns()
new_p_client = get_project_client_for_token(p2, user_token)
workload = create_workload_by_type(new_p_client, name, con, ns2, config)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_list(role, remove_resource, config):
if role == CLUSTER_MEMBER:
p_client, project, ns = setup_wl_project_by_role(role)
else:
p_client, project, ns = setup_wl_project_by_role(CLUSTER_OWNER)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
# switch to rbac role
user_token = rbac_get_user_token_by_role(role)
p_client_rbac = get_project_client_for_token(project, user_token)
assert len(p_client_rbac.list_workload(uuid=workload.uuid).data) == 1
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_list_negative(role, remove_resource, config):
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p_client = get_project_client_for_token(unshared_project, user_token)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
wait_for_wl_to_active(p_client, workload)
# switch to rbac role
user_token = rbac_get_user_token_by_role(role)
p_client_rbac = get_project_client_for_token(unshared_project, user_token)
if role != CLUSTER_OWNER:
assert len(p_client_rbac.list_workload(uuid=workload.uuid).data) == 0
else:
assert len(p_client_rbac.list_workload(uuid=workload.uuid).data) == 1
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_update(role, remove_resource, config):
# workloads of type job cannot be edited
if config == "job":
return
p_client, project, ns = setup_wl_project_by_role(role)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
if role != PROJECT_READ_ONLY:
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
con = [{"name": "test1", "image": os.environ.get('RANCHER_TEST_IMAGE',
"nginx")}]
p_client.update(workload, containers=con)
remove_resource(workload)
if role == CLUSTER_MEMBER:
remove_resource(project)
else:
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p_client = get_project_client_for_token(project, user_token)
ns = rbac_get_namespace()
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
with pytest.raises(ApiError) as e:
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(project, user_token)
con = [{"name": "test1", "image": os.environ.get('RANCHER_TEST_IMAGE',
"nginx")}]
p_client.update(workload, containers=con)
wait_for_pods_in_workload(p_client, workload)
validate_workload(p_client, workload, config, ns.name)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_update_negative(role, remove_resource, config):
# workloads of type job cannot be edited
if config == "job":
return
if role == CLUSTER_OWNER:
# cluster owner can edit workloads in any project in the cluster
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p_client, project, ns = setup_wl_project_by_role(role)
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
con = [{"name": "test1", "image": "nginx"}]
p_client.update(workload, containers=con)
remove_resource(workload)
else:
project2 = rbac_get_unshared_project()
user_token = rbac_get_user_token_by_role(role)
# roles cannot edit workloads in projects they cannot access
# deploy a workload as cluster owner
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster_owner_p_client = get_project_client_for_token(
project2, cluster_owner_token)
ns = rbac_get_unshared_ns()
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = create_workload_by_type(cluster_owner_p_client,
name, con, ns, config)
with pytest.raises(ApiError) as e:
p_client = get_project_client_for_token(project2, user_token)
con = [{"name": "test1", "image": "nginx"}]
p_client.update(workload, containers=con)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_delete(role, remove_resource, config):
p_client, project, ns = setup_wl_project_by_role(role)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
if role != PROJECT_READ_ONLY:
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
p_client.delete(workload)
assert len(p_client.list_workload(uuid=workload.uuid).data) == 0
remove_resource(workload)
else:
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p_client = get_project_client_for_token(project, user_token)
ns = rbac_get_namespace()
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(project, user_token)
with pytest.raises(ApiError) as e:
p_client.delete(workload)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
if role == CLUSTER_MEMBER:
remove_resource(project)
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_delete_negative(role, remove_resource, config):
if role == CLUSTER_OWNER:
# cluster owner can delete workloads in any project in the cluster
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project = rbac_get_unshared_project()
p_client = get_project_client_for_token(project, user_token)
ns = rbac_get_namespace()
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
workload = create_workload_by_type(p_client, name, con, ns, config)
p_client.delete(workload)
else:
project = rbac_get_unshared_project()
user_token = rbac_get_user_token_by_role(role)
# roles cannot delete workloads in projects they cannot access
# deploy a workload as cluster owner
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster_owner_p_client = get_project_client_for_token(
project, cluster_owner_token)
ns = rbac_get_unshared_ns()
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = create_workload_by_type(cluster_owner_p_client,
name, con, ns, config)
p_client = get_project_client_for_token(project, user_token)
with pytest.raises(ApiError) as e:
p_client.delete(workload)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
def setup_wl_project_by_role(role):
if role == CLUSTER_MEMBER:
user_token = rbac_get_user_token_by_role(role)
project, ns = create_project_and_ns(user_token, namespace["cluster"],
random_test_name("test-rbac"))
p_client = get_project_client_for_token(project, user_token)
return p_client, project, ns
else:
project = rbac_get_project()
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(project, user_token)
ns = rbac_get_namespace()
return p_client, project, ns
def create_workload_by_type(client, name, con, ns, config):
if config == "daemonSet":
return client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
elif config == "statefulSet":
return client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
statefulSetConfig={})
elif config == "cronJob":
return client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
cronJobConfig={
"concurrencyPolicy": "Allow",
"failedJobsHistoryLimit": 10,
"schedule": "*/1 * * * *",
"successfulJobsHistoryLimit": 10})
elif config == "job":
return client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
jobConfig={}) | {
"content_hash": "929641b8cca77ce6bc2c6c1d2a09645c",
"timestamp": "",
"source": "github",
"line_count": 1143,
"max_line_length": 84,
"avg_line_length": 42.332458442694666,
"alnum_prop": 0.5931261108585127,
"repo_name": "rancherio/rancher",
"id": "a332fc211b940aa87ea3bb81987d4fc5920a7db4",
"size": "48386",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/validation/tests/v3_api/test_workload.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6795"
},
{
"name": "Shell",
"bytes": "25328"
}
],
"symlink_target": ""
} |
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class SharedStruct:
"""
Attributes:
- key
- value
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'key', None, None, ), # 1
(2, TType.STRING, 'value', None, None, ), # 2
)
def __init__(self, key=None, value=None,):
self.key = key
self.value = value
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.key = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.value = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SharedStruct')
if self.key is not None:
oprot.writeFieldBegin('key', TType.I32, 1)
oprot.writeI32(self.key)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 2)
oprot.writeString(self.value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| {
"content_hash": "a326d8085d9926ec4a127e64a374e901",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 188,
"avg_line_length": 28.902439024390244,
"alnum_prop": 0.6303797468354431,
"repo_name": "radical-cybertools/aimes.swiftrp",
"id": "92dca6d9d31e746cb3f8e5a5bd7b9b074d083618",
"size": "2511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thrift_tests/gen-py/shared/ttypes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "59473"
},
{
"name": "Shell",
"bytes": "3448"
},
{
"name": "Thrift",
"bytes": "1110"
}
],
"symlink_target": ""
} |
size = int(input('Insert size of array:')) # size of random array
array = []
for i in range(size):
n=int(input('Insert Number:'))
array.append(n)
anum = int(input('Insert Number to search:')) # number to search for
# array = random.sample(list(range(1, 20)), size) # get some random numbers
array = sorted(array) # sorted() returns a new list
#array.sort() # sort() sorts in-place
print(anum, array) # show us what you've got
# Search for number in array
def binary_search(number, array, lo, hi):
if hi < lo: return -1 # no more numbers
mid = (lo + hi) // 2 # midpoint in array
if number == array[mid]:
return mid # number found here
elif number < array[mid]:
return binary_search(number, array, lo, mid - 1) # try left of here
else:
return binary_search(number, array, mid + 1, hi) # try above here
def my_search(anum, array): # convenience interface to binary_search()
return binary_search(anum, array, 0, len(array) - 1)
pos = my_search(anum, array)
if pos < 0:
print("not found")
else:
print("found at position", pos)
| {
"content_hash": "684aa8b611fd3e00c9f0ae9d66105fbd",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 86,
"avg_line_length": 37,
"alnum_prop": 0.5996621621621622,
"repo_name": "WebClub-NITK/Hacktoberfest-2k17",
"id": "ae089ed1aac317eb4523b3e2b91d6938f35d6db2",
"size": "1185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Binary_Search/GENU05.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "41"
},
{
"name": "C",
"bytes": "111323"
},
{
"name": "C#",
"bytes": "845"
},
{
"name": "C++",
"bytes": "25563"
},
{
"name": "CSS",
"bytes": "1069"
},
{
"name": "Go",
"bytes": "359"
},
{
"name": "HTML",
"bytes": "32484"
},
{
"name": "Java",
"bytes": "20074"
},
{
"name": "JavaScript",
"bytes": "2713"
},
{
"name": "Lua",
"bytes": "394"
},
{
"name": "PHP",
"bytes": "1042"
},
{
"name": "Pascal",
"bytes": "235"
},
{
"name": "Perl",
"bytes": "579"
},
{
"name": "Python",
"bytes": "32114"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "VHDL",
"bytes": "1542"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.