code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import configparser
import io
import os
import subprocess
from rally.common import logging
from rally.utils import encodeutils
LOG = logging.getLogger(__name__)
def check_output(*args, **kwargs):
"""Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The difference between check_output from subprocess package and this
function:
* Additional arguments:
- "msg_on_err" argument. It is a message that should be written in case
of error. Reduces a number of try...except blocks
- "debug_output" argument(Defaults to True). Print or not output to
LOG.debug
* stderr is hardcoded to stdout
* In case of error, prints failed command and output to LOG.error
* Prints output to LOG.debug
"""
msg_on_err = kwargs.pop("msg_on_err", None)
debug_output = kwargs.pop("debug_output", True)
kwargs["stderr"] = subprocess.STDOUT
try:
output = subprocess.check_output(*args, **kwargs)
except subprocess.CalledProcessError as exc:
if msg_on_err:
LOG.error(msg_on_err)
LOG.error("Failed cmd: '%s'" % exc.cmd)
LOG.error("Error output: '%s'" % encodeutils.safe_decode(exc.output))
raise
output = encodeutils.safe_decode(output)
if output and debug_output:
LOG.debug("Subprocess output: '%s'" % output)
return output
def create_dir(dir_path):
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
return dir_path
def extend_configfile(extra_options, conf_path):
conf_object = configparser.ConfigParser()
conf_object.optionxform = str
conf_object.read(conf_path)
conf_object = add_extra_options(extra_options, conf_object)
with open(conf_path, "w") as configfile:
conf_object.write(configfile)
raw_conf = io.StringIO()
conf_object.write(raw_conf)
return raw_conf.getvalue()
def add_extra_options(extra_options, conf_object):
conf_object.optionxform = str
for section in extra_options:
if section not in (conf_object.sections() + ["DEFAULT"]):
conf_object.add_section(section)
for option, value in extra_options[section].items():
conf_object.set(section, option, value)
return conf_object
| openstack/rally | rally/verification/utils.py | Python | apache-2.0 | 3,055 |
import pytest
import gen.template
from gen.template import (For, Replacement, Switch, Tokenizer, UnsetParameter,
parse_str)
just_text = "foo"
more_complex_text = "foo {"
def get_tokens(str):
return Tokenizer(str).tokens
def test_lex():
assert(get_tokens("foo") == [("blob", "foo"), ("eof", None)])
assert(get_tokens("{") == [('blob', '{'), ('eof', None)])
assert(get_tokens("{#") == [('blob', '{'), ('blob', '#'), ('eof', None)])
assert(get_tokens("{ foo ") == [
('blob', '{'), ('blob', ' foo '), ('eof', None)])
assert(get_tokens("{ foo {{{{ {{{{{ ") == [('blob', '{'), ('blob', ' foo '), (
'blob', '{{'), ('blob', ' '), ('blob', '{{'), ('blob', '{'), ('blob', ' '), ('eof', None)])
assert(get_tokens("{{ test }}") == [
('replacement', ('test', None)), ('eof', None)])
assert(get_tokens("{{ test | foo }}") == [
('replacement', ('test', 'foo')), ('eof', None)])
assert(get_tokens(" {{ test }}") == [
('blob', ' '), ('replacement', ('test', None)), ('eof', None)])
assert(get_tokens("{{ test }}}}") == [
('replacement', ('test', None)), ('blob', '}}'), ('eof', None)])
assert(get_tokens('{% switch foo %}{% case "as\\"df" %}foobar{% endswitch %}}}') == [
('switch', 'foo'),
('case', 'as"df'),
('blob', 'foobar'),
('endswitch', None),
('blob', '}}'),
('eof', None)])
assert(get_tokens('{% switch foo %} \n \r {% case "as\\"df" %}foobar{% endswitch %}}}') == [
('switch', 'foo'),
('blob', ' \n \r '),
('case', 'as"df'),
('blob', 'foobar'),
('endswitch', None),
('blob', '}}'),
('eof', None)])
assert(get_tokens("a{% switch foo %}{% case \"test\" %}{{ a | baz }}b{{ a | bar }}{% endswitch %}c{{ c | bar }}{{ a | foo }}") == [ # noqa
('blob', 'a'),
('switch', 'foo'),
('case', 'test'),
('replacement', ('a', 'baz')),
('blob', 'b'),
('replacement', ('a', 'bar')),
('endswitch', None),
('blob', 'c'),
('replacement', ('c', 'bar')),
('replacement', ('a', 'foo')),
('eof', None)
])
assert(get_tokens("{% for foo in bar %}{{ foo }}{% endfor %}") == [
('for', ('foo', 'bar')),
('replacement', ('foo', None)),
('endfor', None),
('eof', None)])
with pytest.raises(gen.template.SyntaxError):
get_tokens("{{ test |}}")
with pytest.raises(gen.template.SyntaxError):
get_tokens("{{ test| }}")
with pytest.raises(gen.template.SyntaxError):
get_tokens("{{ test | }}")
with pytest.raises(gen.template.SyntaxError):
get_tokens("{{ test }}")
with pytest.raises(gen.template.SyntaxError):
get_tokens("{{test}}")
with pytest.raises(gen.template.SyntaxError):
get_tokens("{{ test}}")
def test_parse():
assert(parse_str("a").ast == ["a"])
assert(parse_str("{{ a }}").ast == [Replacement(("a", None))])
assert(parse_str("a {{ a | foo }}{{ b }} c {{ d | bar }}").ast == [
"a ",
Replacement(("a", 'foo')),
Replacement(("b", None)),
" c ",
Replacement(("d", 'bar'))
])
assert(parse_str('{% switch foo %}{% case "as\\"df" %}foobar{% endswitch %}}}').ast ==
[Switch("foo", {'as"df': ["foobar"]}), '}}'])
assert(parse_str('{{ a }}b{{ c }}{% switch foo %} \n {% case "as\\"df" %}foobar{% endswitch %}}}').ast == [
Replacement(("a", None)),
"b",
Replacement(("c", None)),
Switch("foo", {'as"df': ["foobar"]}),
"}}"
])
# TODO(cmaloney): Add parse syntax error tests
assert parse_str("{% for foo in bar %}{{ foo }}{% endfor %}").ast == [For("foo", "bar", [Replacement('foo')])]
def test_get_variables():
assert(parse_str("a").get_scoped_arguments() ==
{'variables': set(), 'sub_scopes': dict()})
assert(parse_str("{{ a }}").get_scoped_arguments() ==
{'variables': {"a"}, 'sub_scopes': dict()})
assert(parse_str("{{ a | foo }}").get_scoped_arguments() ==
{'variables': {"a"}, 'sub_scopes': dict()})
assert(parse_str("a{{ a }}b{{ c }}").get_scoped_arguments() ==
{'variables': {"a", "c"}, 'sub_scopes': dict()})
assert(parse_str("a{{ a }}b{{ a }}c{{ c | baz }}").get_scoped_arguments() ==
{'variables': {"a", "c"}, 'sub_scopes': dict()})
assert(parse_str("a{{ a }}b{{ a | bar }}c{{ c }}").get_scoped_arguments() ==
{'variables': {"a", "c"}, 'sub_scopes': dict()})
assert(parse_str("{{ a }}{% switch b %}{% case \"c\" %}{{ d }}{% endswitch %}{{ e }}").get_scoped_arguments() == {
'variables': {'a', 'e'},
'sub_scopes': {
'b': {
'c': {
'variables': {'d'},
'sub_scopes': {}
}
}
}
})
assert (parse_str("{% for foo in bar %}{{ foo }}{{ bar }}{{ baz }}{% endfor %}").get_scoped_arguments() ==
{'variables': {'bar', 'baz'}, 'sub_scopes': dict()})
# TODO(cmaloney): Disallow reusing a for new variable as a general variable.
assert (parse_str("{% for foo in bar %}{{ foo }}{{ bar }}{{ baz }}{% endfor %}{{ foo }}").get_scoped_arguments() ==
{'variables': {'foo', 'bar', 'baz'}, 'sub_scopes': dict()})
def test_get_filters():
assert(parse_str("{{ a }}").get_filters() == set())
assert(parse_str("{{ a | foo }}").get_filters() == {"foo"})
assert(parse_str(
"a{{ a | baz }}b{{ a | bar }}c{{ c | bar }}").get_filters() == {"baz", "bar"})
assert(parse_str("a{% switch foo %}{% case \"test\" %}{{ a | baz }}b{{ a | bar }}{% endswitch %}c{{ c | bar }}{{ a | foo }}").get_filters() == {"foo", "baz", "bar"}) # noqa
assert parse_str("{% for foo in bar %}{{ foo | bang }}{% endfor %}").get_filters() == {'bang'}
def test_render():
assert(parse_str("a").render({}) == "a")
assert(parse_str("{{ a }}a{{ b }}").render({"a": "1", "b": "2"}) == "1a2")
assert(parse_str("{{ a | foo }}a{{ b }}").render(
{"a": "1", "b": "2"},
{'foo': lambda x: x + 'foo'}
) == "1fooa2")
with pytest.raises(UnsetParameter):
parse_str("{{ a }}a{{ b }}").render({"a": "1"})
with pytest.raises(UnsetParameter):
parse_str("{{ a }}").render({"c": "1"})
with pytest.raises(UnsetParameter):
parse_str("{{ a | foo }}").render({"a": "1"})
assert parse_str("{% for a in b %}{{ a }}{% endfor %}").render({"b": ['a', 'test']}) == "atest"
assert (parse_str("{% for a in b %}{{ a }}{% endfor %}else{{ a }}").render({"b": ['b', 't', 'c'], "a": "foo"}) ==
"btcelsefoo")
with pytest.raises(UnsetParameter):
parse_str("{% for a in b %}{{ a }}{% endfor %}else{{ a }}").render({"b": ['b', 't', 'c']})
| movicha/dcos | gen/test_template.py | Python | apache-2.0 | 6,998 |
"""Flask Blueprint adding login functionality to our app. Note that we expect
gluten model and db config to be handled elsewhere
"""
import sys
import traceback
from functools import partial, wraps
from flask import redirect, request, flash, session, abort, g, url_for
from flask.globals import LocalProxy, _lookup_app_object
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
from flask_dance.consumer import (
OAuth2ConsumerBlueprint,
oauth_authorized,
oauth_error
)
from gludb.utils import now_field
from .utils import app_logger
from .models import User
def set_user_session(user_id=None):
if not user_id:
user_id = ''
session['user_id'] = user_id
def get_user():
"""Return current user"""
user_id = session.get('user_id', '')
if not user_id:
return None # Not logged in
return User.find_one(user_id)
def require_login(func):
"""Simple decorator helper for requiring login on functions decorated with
flask route: make sure that it's LAST in the decorator list so that the
flask magic happens (see voice_testing for an example).
Important: we are assuming the blueprint endpoint auth.login exists
"""
@wraps(func)
def wrapper(*args, **kwrds):
try:
user = get_user()
if user:
setattr(g, 'user', user)
return func(*args, **kwrds)
else:
url = url_for('auth.login', redir=request.url)
return redirect(url)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
log = app_logger()
log.warning("Unexpected error: %s", exc_value)
log.error(''.join(traceback.format_exception(
exc_type, exc_value, exc_traceback
)))
return abort(500)
return wrapper
# Make the google blueprint (taken from their contrib code)
auth = OAuth2ConsumerBlueprint(
"auth",
__name__,
client_id=None, # Handled via app config
client_secret=None, # Handled via app config
scope=["profile", "email"],
base_url="https://www.googleapis.com/",
authorization_url="https://accounts.google.com/o/oauth2/auth",
token_url="https://accounts.google.com/o/oauth2/token",
redirect_url=None,
redirect_to=None,
login_url=None,
authorized_url=None,
authorization_url_params={},
session_class=None,
backend=None,
)
auth.from_config["client_id"] = "GOOGLE_OAUTH_CLIENT_ID"
auth.from_config["client_secret"] = "GOOGLE_OAUTH_CLIENT_SECRET"
@auth.before_app_request
def set_applocal_session():
ctx = stack.top
ctx.google_oauth = auth.session
google_api = LocalProxy(partial(_lookup_app_object, "google_oauth"))
def login_fail(msg):
flash(msg, category="error")
app_logger().error(msg)
return False
# create/login local user on successful OAuth login
@oauth_authorized.connect
def log_in_event(blueprint, token):
set_user_session() # Clear previous session
if not token:
return login_fail("Failed to log in")
resp = blueprint.session.get("/oauth2/v1/userinfo")
if not resp.ok:
return login_fail("Failed to login user!")
data = resp.json()
email = data.get('email', '')
if not email:
return login_fail("Google failed to supply an email address")
users = User.find_by_index('idx_email', email)
if users:
user = users[0]
else:
user = User(email=email)
# Update the user info and save the session info
user.name = data.get('name', email)
user.photo = data.get('picture', '/static/anonymous_person.png')
user.logins.append(now_field())
user.save()
set_user_session(user.id)
app_logger().info("Logged in user id %s, email %s" % (user.id, user.email))
# notify on OAuth provider error
@oauth_error.connect
def github_error(blueprint, error, error_description=None, error_uri=None):
login_fail("OAuth login failure: [%s] %s (uri=%s)" % (
error, error_description, error_uri
))
@auth.route('/logout')
def logout():
set_user_session()
redir_url = request.args.get("redir", None)
if not redir_url:
redir_url = '/'
return redirect(redir_url)
| memphis-iis/gluten | gluten/auth.py | Python | apache-2.0 | 4,308 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .tables import (
BatchCreateRowsRequest,
BatchCreateRowsResponse,
BatchDeleteRowsRequest,
BatchUpdateRowsRequest,
BatchUpdateRowsResponse,
ColumnDescription,
CreateRowRequest,
DeleteRowRequest,
GetRowRequest,
GetTableRequest,
GetWorkspaceRequest,
LabeledItem,
ListRowsRequest,
ListRowsResponse,
ListTablesRequest,
ListTablesResponse,
ListWorkspacesRequest,
ListWorkspacesResponse,
LookupDetails,
RelationshipDetails,
Row,
Table,
UpdateRowRequest,
Workspace,
View,
)
__all__ = (
"BatchCreateRowsRequest",
"BatchCreateRowsResponse",
"BatchDeleteRowsRequest",
"BatchUpdateRowsRequest",
"BatchUpdateRowsResponse",
"ColumnDescription",
"CreateRowRequest",
"DeleteRowRequest",
"GetRowRequest",
"GetTableRequest",
"GetWorkspaceRequest",
"LabeledItem",
"ListRowsRequest",
"ListRowsResponse",
"ListTablesRequest",
"ListTablesResponse",
"ListWorkspacesRequest",
"ListWorkspacesResponse",
"LookupDetails",
"RelationshipDetails",
"Row",
"Table",
"UpdateRowRequest",
"Workspace",
"View",
)
| googleapis/python-area120-tables | google/area120/tables_v1alpha1/types/__init__.py | Python | apache-2.0 | 1,783 |
import json
import re
import tg
import pkg_resources
import pylons
pylons.c = pylons.tmpl_context
pylons.g = pylons.app_globals
from pylons import c
from ming.orm import ThreadLocalORMSession
from datadiff.tools import assert_equal
from allura import model as M
from allura.lib import helpers as h
from allura.tests import decorators as td
from alluratest.controller import TestController
class _TestCase(TestController):
def setUp(self):
super(_TestCase, self).setUp()
self.setup_with_tools()
@td.with_git
def setup_with_tools(self):
h.set_context('test', 'src-git', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgegit', 'tests/data')
c.app.repo.fs_path = repo_dir
c.app.repo.status = 'ready'
c.app.repo.name = 'testgit.git'
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
h.set_context('test', 'src-git', neighborhood='Projects')
c.app.repo.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
class TestRootController(_TestCase):
def test_index(self):
resp = self.app.get('/src-git/').follow().follow()
assert 'git://' in resp
def test_index_empty(self):
self.app.get('/git/')
def test_commit_browser(self):
resp = self.app.get('/src-git/commit_browser')
def test_commit_browser_data(self):
resp = self.app.get('/src-git/commit_browser_data')
data = json.loads(resp.body);
assert data['max_row'] == 3
assert data['next_column'] == 1
assert_equal(data['built_tree']['df30427c488aeab84b2352bdf88a3b19223f9d7a'],
{u'url': u'/p/test/src-git/ci/df30427c488aeab84b2352bdf88a3b19223f9d7a/',
u'oid': u'df30427c488aeab84b2352bdf88a3b19223f9d7a',
u'column': 0,
u'parents': [u'6a45885ae7347f1cac5103b0050cc1be6a1496c8'],
u'message': u'Add README', u'row': 1})
def test_log(self):
resp = self.app.get('/src-git/ref/master~/log/')
def test_tags(self):
resp = self.app.get('/src-git/ref/master~/tags/')
def _get_ci(self):
r = self.app.get('/src-git/ref/master:/')
resp = r.follow()
for tag in resp.html.findAll('a'):
if tag['href'].startswith('/p/test/src-git/ci/'):
return tag['href']
return None
def test_commit(self):
ci = self._get_ci()
resp = self.app.get(ci)
assert 'Rick' in resp, resp.showbrowser()
def test_feed(self):
assert 'Add README' in self.app.get('/feed')
def test_tree(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/')
assert len(resp.html.findAll('tr')) == 2, resp.showbrowser()
resp = self.app.get(ci + 'tree/')
assert 'README' in resp, resp.showbrowser()
links = [ a.get('href') for a in resp.html.findAll('a') ]
assert 'README' in links, resp.showbrowser()
assert 'README/' not in links, resp.showbrowser()
def test_tree_extra_params(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/?format=raw')
assert 'README' in resp, resp.showbrowser()
def test_file(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README')
assert 'README' in resp.html.find('h2', {'class':'dark title'}).contents[2]
content = str(resp.html.find('div', {'class':'clip grid-19'}))
assert 'This is readme' in content, content
assert '<span id="l1" class="code_block">' in resp
assert 'var hash = window.location.hash.substring(1);' in resp
def test_invalid_file(self):
ci = self._get_ci()
self.app.get(ci + 'tree/READMEz', status=404)
def test_diff(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README?diff=df30427c488aeab84b2352bdf88a3b19223f9d7a')
assert 'readme' in resp, resp.showbrowser()
assert '+++' in resp, resp.showbrowser()
def test_refresh(self):
notification = M.Notification.query.find(
dict(subject='[test:src-git] 4 new commits to test Git')).first()
domain = '.'.join(reversed(c.app.url[1:-1].split('/'))).replace('_', '-')
common_suffix = tg.config.get('forgemail.domain', '.sourceforge.net')
email = 'noreply@%s%s' % (domain, common_suffix)
assert email in notification['reply_to_address']
def test_file_force_display(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README?force=True')
content = str(resp.html.find('div', {'class':'clip grid-19'}))
assert re.search(r'<pre>.*This is readme', content), content
assert '</pre>' in content, content
class TestRestController(_TestCase):
def test_index(self):
self.app.get('/rest/p/test/src-git/', status=200)
def test_commits(self):
self.app.get('/rest/p/test/src-git/commits', status=200)
class TestFork(_TestCase):
def setUp(self):
super(TestFork, self).setUp()
to_project = M.Project.query.get(
shortname='test2', neighborhood_id=c.project.neighborhood_id)
r = self.app.post('/src-git/fork', params=dict(
project_id=str(to_project._id),
mount_point='code',
mount_label='Test forked repository'))
assert "{status: 'error'}" not in str(r.follow())
cloned_from = c.app.repo
with h.push_context('test2', 'code', neighborhood='Projects'):
c.app.repo.init_as_clone(
cloned_from.full_fs_path,
cloned_from.app.config.script_name(),
cloned_from.full_fs_path)
def _follow(self, r, **kw):
if r.status_int == 302:
print r.request.url
while r.status_int == 302:
print ' ==> 302 ==> %s' % r.location
r = r.follow(**kw)
return r
def _upstream_page(self, **kw):
r = self.app.get('/src-git/', **kw)
r = self._follow(r, **kw)
return r
def _fork_page(self, **kw):
r = self.app.get('/p/test2/code/', **kw)
r = self._follow(r, **kw)
return r
def _request_merge(self, **kw):
r = self.app.get('/p/test2/code/request_merge', **kw)
r = self._follow(r, **kw)
r = r.forms[0].submit()
r = self._follow(r, **kw)
mr_num = r.request.url.split('/')[-2]
assert mr_num.isdigit(), mr_num
return r, mr_num
def test_fork_form(self):
r = self.app.get('%sfork/' % c.app.repo.url())
assert '<input type="text" name="mount_point" value="test"/>' in r
assert '<input type="text" name="mount_label" value="test - Git"/>' in r
def test_fork_listed_in_parent(self):
assert 'Forks' in self._upstream_page()
def test_fork_display(self):
r = self._fork_page()
assert 'Clone of' in r
assert 'Test forked repository' in r
def test_fork_links_go_to_fork(self):
r = self._fork_page()
hrefs = ( a.get('href') for a in r.html('a') )
hrefs = ( href for href in hrefs if href and '/ci/' in href )
for href in hrefs:
assert href.startswith('/p/test2/code/'), href
def test_merge_request_visible_to_admin(self):
assert 'Request Merge' in self._fork_page()
def test_merge_request_invisible_to_non_admin(self):
assert 'Request Merge' not in self._fork_page(
extra_environ=dict(username='test-user'))
def test_merge_action_available_to_admin(self):
self.app.get('/p/test2/code/request_merge')
def test_merge_action_unavailable_to_non_admin(self):
self.app.get(
'/p/test2/code/request_merge',
status=403, extra_environ=dict(username='test-user'))
def test_merge_request_detail_view(self):
r, mr_num = self._request_merge()
assert 'would like you to merge' in r, r.showbrowser()
def test_merge_request_list_view(self):
r, mr_num = self._request_merge()
r = self.app.get('/p/test/src-git/merge-requests/')
assert 'href="%s/"' % mr_num in r, r
def test_merge_request_update_status(self):
r, mr_num = self._request_merge()
r = self.app.post('/p/test/src-git/merge-requests/%s/save' % mr_num,
params=dict(status='rejected')).follow()
assert 'Merge Request #%s: (rejected)' % mr_num in r, r
| leotrubach/sourceforge-allura | ForgeGit/forgegit/tests/functional/test_controllers.py | Python | apache-2.0 | 8,569 |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import importutils
import webob
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron.api.v2 import resource
from neutron.common import constants as const
from neutron.common import exceptions as n_exc
from neutron import manager
from neutron import quota
from neutron.quota import resource_registry
from neutron import wsgi
RESOURCE_NAME = 'quota'
RESOURCE_COLLECTION = RESOURCE_NAME + "s"
QUOTAS = quota.QUOTAS
DB_QUOTA_DRIVER = 'neutron.db.quota.driver.DbQuotaDriver'
EXTENDED_ATTRIBUTES_2_0 = {
RESOURCE_COLLECTION: {}
}
class QuotaSetsController(wsgi.Controller):
def __init__(self, plugin):
self._resource_name = RESOURCE_NAME
self._plugin = plugin
self._driver = importutils.import_class(
cfg.CONF.QUOTAS.quota_driver
)
self._update_extended_attributes = True
def _update_attributes(self):
for quota_resource in resource_registry.get_all_resources().keys():
attr_dict = EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION]
attr_dict[quota_resource] = {
'allow_post': False,
'allow_put': True,
'convert_to': attributes.convert_to_int,
'validate': {'type:range': [-1, const.DB_INTEGER_MAX_VALUE]},
'is_visible': True}
self._update_extended_attributes = False
def _get_quotas(self, request, tenant_id):
return self._driver.get_tenant_quotas(
request.context,
resource_registry.get_all_resources(),
tenant_id)
def create(self, request, body=None):
msg = _('POST requests are not supported on this resource.')
raise webob.exc.HTTPNotImplemented(msg)
def index(self, request):
context = request.context
self._check_admin(context)
return {self._resource_name + "s":
self._driver.get_all_quotas(
context, resource_registry.get_all_resources())}
def tenant(self, request):
"""Retrieve the tenant info in context."""
context = request.context
if not context.tenant_id:
raise n_exc.QuotaMissingTenant()
return {'tenant': {'tenant_id': context.tenant_id}}
def show(self, request, id):
if id != request.context.tenant_id:
self._check_admin(request.context,
reason=_("Only admin is authorized "
"to access quotas for another tenant"))
return {self._resource_name: self._get_quotas(request, id)}
def _check_admin(self, context,
reason=_("Only admin can view or configure quota")):
if not context.is_admin:
raise n_exc.AdminRequired(reason=reason)
def delete(self, request, id):
self._check_admin(request.context)
self._driver.delete_tenant_quota(request.context, id)
def update(self, request, id, body=None):
self._check_admin(request.context)
if self._update_extended_attributes:
self._update_attributes()
body = base.Controller.prepare_request_body(
request.context, body, False, self._resource_name,
EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION])
for key, value in body[self._resource_name].items():
self._driver.update_quota_limit(request.context, id, key, value)
return {self._resource_name: self._get_quotas(request, id)}
class Quotasv2(extensions.ExtensionDescriptor):
"""Quotas management support."""
@classmethod
def get_name(cls):
return "Quota management support"
@classmethod
def get_alias(cls):
return RESOURCE_COLLECTION
@classmethod
def get_description(cls):
description = 'Expose functions for quotas management'
if cfg.CONF.QUOTAS.quota_driver == DB_QUOTA_DRIVER:
description += ' per tenant'
return description
@classmethod
def get_updated(cls):
return "2012-07-29T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
controller = resource.Resource(
QuotaSetsController(manager.NeutronManager.get_plugin()),
faults=base.FAULT_MAP)
return [extensions.ResourceExtension(
Quotasv2.get_alias(),
controller,
collection_actions={'tenant': 'GET'})]
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| apporc/neutron | neutron/extensions/quotasv2.py | Python | apache-2.0 | 5,333 |
from adt.util.prog import Prog
from adt.util.literal import Literal
from adt.util.expr import Expr
from adt.util.unary_op import Unary_op
from adt.util.binary_op import Binary_op
from adt.util.block import Block
from adt.util.context import Context
from adt.util.instr import Instr
from adt.types.bool import Bool
from adt.types.nat import Nat
from adt.types.char import Char
from adt.types.string import String
from adt.types.relative import Z
from adt.types.relative_list import List
from adt.types.map import Map
def test_eval_expr():
var = String('var')
lit1 = Literal.lit_nat(Nat(5))
lit2 = Literal.lit_nat(Nat(3))
context = Context.cons(c=Context.empty(), k=var, v=lit2)
expr = Expr.expr_lit(lit1)
# literal
assert Prog.eval_expr(expr=expr, context=context) == lit1
# variable
assert Prog.eval_expr(expr=Expr.expr_variable(var), context=context) == lit2
# unary operations
# o.not
lit_true = Literal.lit_bool(Bool.true())
lit_false = Literal.lit_bool(Bool.false())
op = Unary_op.o_not()
expr_bool = Expr.expr_unary(op=op, expr=Expr.expr_lit(lit_true))
assert Prog.eval_expr(expr=expr_bool, context=context) == lit_false
assert Prog.eval_expr(expr=expr_bool, context=context) != lit_true
# uSub
lit_z = Literal.lit_z(Z(2))
lit_z2 = Literal.lit_z(Z(-2))
op = Unary_op.uSub()
expr_z = Expr.expr_unary(op=op, expr=Expr.expr_lit(lit_z))
assert Prog.eval_expr(expr=expr_z, context=context) == lit_z2
# binary operations
# add
lit_z1 = Literal.lit_z(Z(2))
lit_z2 = Literal.lit_z(Z(-5))
lit_z3 = Literal.lit_z(Z(-3))
op = Binary_op.add()
expr = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_z1), expr2=Expr.expr_lit(lit_z2))
assert Prog.eval_expr(expr=expr, context=context) == lit_z3
# sub
lit_z3 = Literal.lit_z(Z(7))
op = Binary_op.sub()
expr = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_z1), expr2=Expr.expr_lit(lit_z2))
assert Prog.eval_expr(expr=expr, context=context) == lit_z3
# mult
lit_z3 = Literal.lit_z(Z(10))
op = Binary_op.mult()
expr = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_z1), expr2=Expr.expr_lit(lit_z2))
# assert Prog.eval_expr(expr=expr, context=context) == lit_z3
# div
lit_z3 = Literal.lit_z(Z(2))
op = Binary_op.div()
expr = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_z2), expr2=Expr.expr_lit(lit_z1))
# assert Prog.eval_expr(expr=expr, context=context) == lit_z3
# modulo
lit_z3 = Literal.lit_z(Z(1))
op = Binary_op.modulo()
expr = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_z2), expr2=Expr.expr_lit(lit_z1))
# assert Prog.eval_expr(expr=expr, context=context) == lit_z3
# and
op = Binary_op.o_and()
expr1 = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_true), expr2=Expr.expr_lit(lit_true))
expr2 = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_false), expr2=Expr.expr_lit(lit_true))
assert Prog.eval_expr(expr=expr1, context=context) == lit_true
assert Prog.eval_expr(expr=expr2, context=context) == lit_false
assert Prog.eval_expr(expr=expr2, context=context) != lit_true
# or
op = Binary_op.o_or()
expr1 = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_false), expr2=Expr.expr_lit(lit_false))
expr2 = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_false), expr2=Expr.expr_lit(lit_true))
assert Prog.eval_expr(expr=expr1, context=context) == lit_false
assert Prog.eval_expr(expr=expr2, context=context) == lit_true
assert Prog.eval_expr(expr=expr2, context=context) != lit_false
# xor
op = Binary_op.xor()
expr1 = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_true), expr2=Expr.expr_lit(lit_true))
expr2 = Expr.expr_binary(op=op, expr1=Expr.expr_lit(lit_false), expr2=Expr.expr_lit(lit_true))
assert Prog.eval_expr(expr=expr1, context=context) == lit_false
assert Prog.eval_expr(expr=expr2, context=context) == lit_true
assert Prog.eval_expr(expr=expr2, context=context) != lit_false
| mencattini/ideal-pancake | adt/tests/prog_test.py | Python | apache-2.0 | 4,066 |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import time
import mock
from mox3 import mox
from os_xenapi.client import XenAPI
from nova.compute import utils as compute_utils
from nova import context
from nova import exception
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi.image import glance
from nova.virt.xenapi import vm_utils
class TestGlanceStore(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(TestGlanceStore, self).setUp()
self.store = glance.GlanceStore()
self.flags(api_servers=['http://localhost:9292'], group='glance')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.context = context.RequestContext(
'user', 'project', auth_token='foobar')
fake.reset()
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.stubs.Set(
vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
self.instance = {'uuid': 'blah',
'system_metadata': [],
'auto_disk_config': True,
'os_type': 'default',
'xenapi_use_agent': 'true'}
def _get_params(self):
return {'image_id': 'fake_image_uuid',
'endpoint': 'http://localhost:9292',
'sr_path': '/fake/sr/path',
'api_version': 2,
'extra_headers': {'X-Auth-Token': 'foobar',
'X-Roles': '',
'X-Tenant-Id': 'project',
'X-User-Id': 'user',
'X-Identity-Status': 'Confirmed'}}
def _get_download_params(self):
params = self._get_params()
params['uuid_stack'] = ['uuid1']
return params
def test_download_image(self):
params = self._get_download_params()
self.stubs.Set(vm_utils, '_make_uuid_stack',
lambda *a, **kw: ['uuid1'])
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized('glance.py', 'download_vhd2',
**params)
self.mox.ReplayAll()
self.store.download_image(self.context, self.session,
self.instance, 'fake_image_uuid')
self.mox.VerifyAll()
@mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1'])
@mock.patch.object(random, 'shuffle')
@mock.patch.object(time, 'sleep')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def test_download_image_retry(self, mock_fault, mock_sleep,
mock_shuffle, mock_make_uuid_stack):
params = self._get_download_params()
self.flags(num_retries=2, group='glance')
params.pop("endpoint")
calls = [mock.call('glance.py', 'download_vhd2',
endpoint='http://10.0.1.1:9292',
**params),
mock.call('glance.py', 'download_vhd2',
endpoint='http://10.0.0.1:9293',
**params)]
glance_api_servers = ['10.0.1.1:9292',
'http://10.0.0.1:9293']
self.flags(api_servers=glance_api_servers, group='glance')
with (mock.patch.object(self.session, 'call_plugin_serialized')
) as mock_call_plugin_serialized:
error_details = ["", "", "RetryableError", ""]
error = self.session.XenAPI.Failure(details=error_details)
mock_call_plugin_serialized.side_effect = [error, "success"]
self.store.download_image(self.context, self.session,
self.instance, 'fake_image_uuid')
mock_call_plugin_serialized.assert_has_calls(calls)
self.assertEqual(1, mock_fault.call_count)
def _get_upload_params(self, auto_disk_config=True,
expected_os_type='default'):
params = self._get_params()
params['vdi_uuids'] = ['fake_vdi_uuid']
params['properties'] = {'auto_disk_config': auto_disk_config,
'os_type': expected_os_type}
return params
def _test_upload_image(self, auto_disk_config, expected_os_type='default'):
params = self._get_upload_params(auto_disk_config, expected_os_type)
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params)
self.mox.ReplayAll()
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()
def test_upload_image(self):
self._test_upload_image(True)
def test_upload_image_None_os_type(self):
self.instance['os_type'] = None
self._test_upload_image(True, 'linux')
def test_upload_image_no_os_type(self):
del self.instance['os_type']
self._test_upload_image(True, 'linux')
def test_upload_image_auto_config_disk_disabled(self):
sys_meta = [{"key": "image_auto_disk_config", "value": "Disabled"}]
self.instance["system_metadata"] = sys_meta
self._test_upload_image("disabled")
def test_upload_image_raises_exception(self):
params = self._get_upload_params()
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(RuntimeError)
self.mox.ReplayAll()
self.assertRaises(RuntimeError, self.store.upload_image,
self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()
def test_upload_image_retries_then_raises_exception(self):
self.flags(num_retries=2, group='glance')
params = self._get_upload_params()
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.mox.StubOutWithMock(time, 'sleep')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
error_details = ["", "", "RetryableError", ""]
error = self.session.XenAPI.Failure(details=error_details)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (XenAPI.Failure,
error,
mox.IgnoreArg()))
time.sleep(0.5)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (XenAPI.Failure,
error,
mox.IgnoreArg()))
time.sleep(1)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (XenAPI.Failure,
error,
mox.IgnoreArg()))
self.mox.ReplayAll()
self.assertRaises(exception.CouldNotUploadImage,
self.store.upload_image,
self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()
def test_upload_image_retries_on_signal_exception(self):
self.flags(num_retries=2, group='glance')
params = self._get_upload_params()
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.mox.StubOutWithMock(time, 'sleep')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
error_details = ["", "task signaled", "", ""]
error = self.session.XenAPI.Failure(details=error_details)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (XenAPI.Failure,
error,
mox.IgnoreArg()))
time.sleep(0.5)
# Note(johngarbutt) XenServer 6.1 and later has this error
error_details = ["", "signal: SIGTERM", "", ""]
error = self.session.XenAPI.Failure(details=error_details)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (XenAPI.Failure,
error,
mox.IgnoreArg()))
time.sleep(1)
self.session.call_plugin_serialized('glance.py', 'upload_vhd2',
**params)
self.mox.ReplayAll()
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()
| hanlind/nova | nova/tests/unit/virt/xenapi/image/test_glance.py | Python | apache-2.0 | 10,892 |
#
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
#
# Copyright (c) 2015 Juniper Networks, Inc.
# All rights reserved.
#
# Use is subject to license terms.
#
# Licensed under the Apache License, Version 2.0 (the ?License?); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module defines the Connection class.
"""
from __future__ import unicode_literals
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import object
import requests
import logging
class Connection(object):
""" Creates a connection to Space Platform mimicking a GUI login.
This class is **not** thread-safe. It is up to the users of the class to
ensure thread safety. The ``rest.Space`` class uses this class for
supporting session-based connections to Junos Space. Thread-safety
requirements are met by that class.
"""
def __init__(self,
homeurl,
username=None,
password=None,
cert=None,
our_ip=None):
self._logger = logging.getLogger('root')
self.homeurl = homeurl + '/mainui'
self.authurl = homeurl + '/mainui/j_security_check'
self.session = None
if username is not None:
if password is None:
raise ValueError('password is mandatory along with username')
if cert is not None:
raise ValueError('You must provide only one of username+password or cert')
else:
if password is not None:
raise ValueError('password is valid only along with username')
if cert is None:
raise ValueError('You must provide one of username+password or cert')
self.username = username
self.password = password
self.our_ip = our_ip
self.cert = cert
self._logger.debug("Connection: Initiating login to %s", self.homeurl)
self.login()
def login(self):
""" Login to Space """
self.session = requests.Session()
sess = self.session
if self.our_ip is None:
resp = sess.get(self.homeurl, cert=self.cert, verify=False)
#self._logger.debug(resp.status_code)
#self._logger.debug(resp.headers)
#self._logger.debug(resp.text)
# Extract the ipAddr and code variables embbed in the form validation code
ip_addr_start_idx = resp.text.find("var ipAddr = ")
if ip_addr_start_idx < 0:
self.check_login_status()
return
ip_addr_end_idx = resp.text.find("\n", ip_addr_start_idx)
ip_addr_line = resp.text[ip_addr_start_idx : ip_addr_end_idx]
ip_addr_items = ip_addr_line.split("=", 2)
ip_addr = ip_addr_items[1].strip("'; ").strip()
#codeStartIdx = r.text.find("var code = ", ip_addr_end_idx);
#codeEndIdx = r.text.find("\n", codeStartIdx);
#codeLine = r.text[codeStartIdx : codeEndIdx]
#codeItems = codeLine.split("=", 2);
#code = codeItems[1].strip("'; ").strip();'''
#form_username = self.username + '%' + code + '@' + ip_addr;
else:
resp = sess.get(self.homeurl, cert=self.cert, verify=False)
ip_addr = self.our_ip
form_username = self.username + '@' + ip_addr
data = {
"j_screen_username" : self.username,
"j_username" : form_username,
"j_password" : self.password
}
self._logger.debug(data)
resp = sess.post(self.authurl, data=data, cert=self.cert, verify=False)
#self._logger.debug(resp.status_code)
#self._logger.debug(resp.headers)
#self._logger.debug(resp.text)
self.check_login_status()
def is_logged_in(self):
""" Checks if a login has been established """
return self.session is not None
def check_login_status(self):
""" Check login-status """
if not self.is_logged_in():
raise Exception("Not logged in")
resp = self.session.get(self.homeurl, verify=False)
ip_addr_start_idx = resp.text.find("var ipAddr = ")
if ip_addr_start_idx >= 0:
raise Exception("Not in a logged-in session.")
def get_session(self):
""" Return the HTTP session object """
if self.is_logged_in():
return self.session
else:
raise Exception("Not logged in")
def logout(self):
""" Logout from Space Server """
logout_url = self.homeurl + "/unsecured/logout.jsp"
resp = self.session.get(logout_url, verify=False)
#self._logger.debug(resp.status_code)
#self._logger.debug(resp.headers)
#self._logger.debug(resp.text)
if resp.status_code == 200:
self.session = None
| Juniper/py-space-platform | jnpr/space/connection.py | Python | apache-2.0 | 5,362 |
import Utils
from Utils import printe
class CommandBuilder(object):
def __init__(self, *command_args):
self.command_args = list(command_args)
def append(self, *args):
for arg in args:
if isinstance(arg, str):
self.command_args += [arg]
elif isinstance(arg, list) or isinstance(arg, tuple):
for sub_arg in arg:
self.append(sub_arg)
else:
printe('Error appending argument of unknown type: {}'.format(
str(type(arg))), terminate=True)
return self
def debug(self):
return Utils.debug(*self.command_args)
def run(self, replaceForeground=False):
return Utils.run(*self.command_args,
replaceForeground=replaceForeground)
| bytejive/lazy-docker | CommandBuilder.py | Python | apache-2.0 | 828 |
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import re
import socket
import sys
import tempfile
from datetime import datetime
from subprocess import CalledProcessError
from subprocess import check_output, STDOUT
import termios
import json
import logging
from pprint import pformat
import yaml
from deepdiff import DeepDiff
LOCAL_IP_ENV = "MY_IP"
LOCAL_IPv6_ENV = "MY_IPv6"
logger = logging.getLogger(__name__)
ETCD_SCHEME = os.environ.get("ETCD_SCHEME", "http")
ETCD_CA = os.environ.get("ETCD_CA_CERT_FILE", "")
ETCD_CERT = os.environ.get("ETCD_CERT_FILE", "")
ETCD_KEY = os.environ.get("ETCD_KEY_FILE", "")
ETCD_HOSTNAME_SSL = "etcd-authority-ssl"
KUBECONFIG = "/home/user/certs/kubeconfig"
API_VERSION = 'projectcalico.org/v3'
ERROR_CONFLICT = "update conflict"
NOT_FOUND = "resource does not exist"
NOT_NAMESPACED = "is not namespaced"
SET_DEFAULT = "Cannot set"
NOT_SUPPORTED = "is not supported on"
KUBERNETES_NP = "kubernetes network policies must be managed through the kubernetes API"
NOT_LOCKED = "Datastore is not locked. Run the `calicoctl datastore migrate lock` command in order to begin migration."
NOT_KUBERNETES = "Invalid datastore type: etcdv3 to import to for datastore migration. Datastore type must be kubernetes"
NO_IPAM = "No IPAM resources specified in file"
class CalicoctlOutput:
"""
CalicoctlOutput contains the output from running a calicoctl command using
the calicoctl function below.
This class contains the command, output and error code (if it failed)
along with YAML/JSON decoded output if the output could be decoded.
"""
def __init__(self, command, output, error=None):
self.command = command
self.output = output
self.error = error
# Attempt to decode the output and store the output format.
self.decoded, self.decoded_format = decode_json_yaml(self.output)
def assert_data(self, data, format="yaml", text=None):
"""
Assert the decoded output from the calicoctl command matches the
supplied data and the expected decoder format.
Args:
data: The data to compare
format: The expected output format of the data.
text: (optional) Expected text in the command output.
"""
self.assert_no_error(text)
assert self.decoded is not None, "No value was decoded from calicoctl response."
if isinstance(data, str):
data, _ = decode_json_yaml(data)
assert data is not None, "String data did not decode"
if format is not None:
assert format == self.decoded_format, "Decoded format is different. " \
"expect %s; got %s" % (format, self.decoded_format)
# Copy and clean the decoded data to allow it to be comparable.
cleaned = clean_calico_data(self.decoded)
assert cmp(cleaned, data) == 0, \
"Items are not the same. Difference is:\n %s" % \
pformat(DeepDiff(cleaned, data), indent=2)
def assert_empty_list(self, kind, format="yaml", text=None):
"""
Assert the calicoctl command output an empty list of the specified
kind.
Args:
kind: The resource kind.
format: The expected output format of the data.
text: (optional) Expected text in the command output.
Returns:
"""
data = make_list(kind, [])
self.assert_data(data, format=format, text=text)
def assert_list(self, kind, items, format="yaml", text=None):
"""
Assert the calicoctl command output a list of the specified
kind.
Args:
kind: The resource kind.
items: A list of the items in the list.
format: The expected output format of the data.
text: (optional) Expected text in the command output.
Returns:
"""
data = make_list(kind, items)
self.assert_data(data, format=format, text=text)
def assert_error(self, text=None):
"""
Assert the calicoctl command exited with an error and did not panic
Args:
text: (optional) Expected text in the command output.
"""
assert self.error, "Expected error running command; \n" \
"command=" + self.command + "\noutput=" + self.output
assert not "panic" in self.output, "Exited with an error due to a panic"
self.assert_output_contains(text)
def assert_no_error(self, text=None):
"""
Assert the calicoctl command did not exit with an error code.
Args:
text: (optional) Expected text in the command output.
"""
assert not self.error, "Expected no error running command; \n" \
"command=" + self.command + "\noutput=" + self.output
# If text is supplied, assert it appears in the output
if text:
self.assert_output_contains(text)
def assert_output_equals(self, text):
"""
Assert the calicoctl command output is exactly the supplied text.
Args:
text: Expected text in the command output.
"""
if not text:
return
assert text == self.output, "Expected output to exactly match; \n" + \
"command=" + self.command + "\noutput=\n" + self.output + \
"\nexpected=\n" + text
def assert_output_equals_ignore_res_version(self, text):
"""
Assert the calicoctl command output is exactly the supplied text.
Args:
text: Expected text in the command output.
"""
if not text:
return
text = re.sub('resourceVersion: ".*?"', 'resourceVersion: "<ignored>"', text)
out = re.sub('resourceVersion: ".*?"', 'resourceVersion: "<ignored>"', self.output)
assert text == out, "Expected output to match after ignoring resource version; \n" + \
"command=" + self.command + "\noutput=\n" + out + \
"\nexpected=\n" + text
def assert_output_contains(self, text):
"""
Assert the calicoctl command output contains the supplied text.
Args:
text: Expected text in the command output.
"""
if not text:
return
assert text in self.output, "Expected text in output; \n" + \
"command=" + self.command + "\noutput=\n" + self.output + \
"\nexpected=\n" + text
def assert_output_not_contains(self, text):
"""
Assert the calicoctl command output does not contain the supplied text.
Args:
text: Expected text in the command output.
"""
if not text:
return
assert not text in self.output, "Unexpected text in output; \n" + \
"command=" + self.command + "\noutput=\n" + self.output + \
"\nunexpected=\n" + text
def calicoctl(command, data=None, load_as_stdin=False, format="yaml", only_stdout=False, no_config=False, kdd=False, allowVersionMismatch=True):
"""
Convenience function for abstracting away calling the calicoctl
command.
:param command: The calicoctl command line parms as a single string.
:param data: Input data either as a string or a JSON serializable Python
object.
:param load_as_stdin: Load the input data through stdin rather than by
loading from file.
:param format: Specify the format for loading the data.
:param only_stdout: Return only the stdout
:return: The output from the command with leading and trailing
whitespace removed.
"""
# If input data is specified, save it to file in the required format.
if isinstance(data, str):
data, _ = decode_json_yaml(data)
assert data is not None, "String data did not decode"
if data is not None:
if format == "yaml":
writeyaml("/tmp/input-data", data)
else:
writejson("/tmp/input-data", data)
stdin = ''
option_file = ''
if data and load_as_stdin:
stdin = 'cat /tmp/input-data | '
option_file = ' -f -'
elif data and not load_as_stdin:
option_file = ' -f /tmp/input-data'
calicoctl_bin = os.environ.get("CALICOCTL", "/code/bin/calicoctl-linux-amd64")
if allowVersionMismatch:
calicoctl_bin += " --allow-version-mismatch"
if ETCD_SCHEME == "https":
etcd_auth = "%s:2379" % ETCD_HOSTNAME_SSL
else:
etcd_auth = "%s:2379" % get_ip()
# Export the environment, in case the command has multiple parts, e.g.
# use of | or ;
#
# Pass in all etcd params, the values will be empty if not set anyway
calicoctl_env_cmd = "export ETCD_ENDPOINTS=%s; " \
"export ETCD_CA_CERT_FILE=%s; " \
"export ETCD_CERT_FILE=%s; " \
"export ETCD_KEY_FILE=%s; " \
"export DATASTORE_TYPE=%s; %s %s" % \
(ETCD_SCHEME+"://"+etcd_auth, ETCD_CA, ETCD_CERT, ETCD_KEY,
"etcdv3", stdin, calicoctl_bin)
if kdd:
calicoctl_env_cmd = "export DATASTORE_TYPE=kubernetes; " \
"export KUBECONFIG=%s; %s %s" % \
(KUBECONFIG, stdin, calicoctl_bin)
if no_config :
calicoctl_env_cmd = calicoctl_bin
full_cmd = calicoctl_env_cmd + " " + command + option_file
try:
output = log_and_run(full_cmd, stderr=(None if only_stdout else STDOUT))
return CalicoctlOutput(full_cmd, output)
except CalledProcessError as e:
return CalicoctlOutput(full_cmd, e.output, error=e.returncode)
def clean_calico_data(data, extra_keys_to_remove=None):
"""
Clean the data returned from a calicoctl get command to remove empty
structs, null values and non-configurable fields. This makes comparison
with the input data much simpler.
Args:
data: The data to clean.
extra_keys_to_remove: more keys to remove if needed.
Returns: The cleaned data.
"""
new = copy.deepcopy(data)
# Recursively delete empty structs / nil values and non-configurable
# fields.
def clean_elem(elem, extra_keys):
if isinstance(elem, list):
# Loop through each element in the list
for i in elem:
clean_elem(i, extra_keys)
if isinstance(elem, dict):
# Remove non-settable fields, and recursively clean each value of
# the dictionary, removing nil values or values that are empty
# dicts after cleaning.
del_keys = ['creationTimestamp', 'resourceVersion', 'uid']
if extra_keys is not None:
for extra_key in extra_keys:
del_keys.append(extra_key)
for k, v in elem.iteritems():
clean_elem(v, extra_keys)
if v is None or v == {}:
del_keys.append(k)
for k in del_keys:
if k in elem:
del(elem[k])
clean_elem(new, extra_keys_to_remove)
return new
def decode_json_yaml(value):
try:
decoded = json.loads(value)
# fix the python datetime back into isoformat with empty timezone information
decoded = find_and_format_creation_timestamp(decoded)
return decoded, "json"
except ValueError:
pass
try:
decoded = yaml.safe_load(value)
# fix the python datetime back into isoformat with empty timezone information
decoded = find_and_format_creation_timestamp(decoded)
return decoded, "yaml"
except yaml.YAMLError:
pass
return None, None
def find_and_format_creation_timestamp(decoded):
if decoded:
if 'items' in decoded:
for i in xrange(len(decoded['items'])):
decoded['items'][i] = format_creation_timestamp(decoded['items'][i])
else:
decoded = format_creation_timestamp(decoded)
return decoded
def format_creation_timestamp(decoded):
if isinstance(decoded, dict) and 'metadata' in decoded and 'creationTimestamp' in decoded['metadata']:
if isinstance(decoded['metadata']['creationTimestamp'], datetime):
decoded['metadata']['creationTimestamp'] = decoded.get('metadata', {}). \
get('creationTimestamp', datetime.utcnow()).isoformat() + 'Z'
return decoded
def writeyaml(filename, data):
"""
Converts a python dict to yaml and outputs to a file.
:param filename: filename to write
:param data: dictionary to write out as yaml
"""
with open(filename, 'w') as f:
text = yaml.dump(data, default_flow_style=False)
logger.debug("Writing %s: \n%s" % (filename, truncate_for_log(text, 4000)))
f.write(text)
def writejson(filename, data):
"""
Converts a python dict to json and outputs to a file.
:param filename: filename to write
:param data: dictionary to write out as json
"""
with open(filename, 'w') as f:
text = json.dumps(data,
sort_keys=True,
indent=2,
separators=(',', ': '))
logger.debug("Writing %s: \n%s" % (filename, truncate_for_log(text, 4000)))
f.write(text)
def truncate_for_log(text, length):
if len(text) <=length:
return text
return text[:length] + "... <truncated>"
def get_ip(v6=False):
"""
Return a string of the IP of the hosts interface.
Try to get the local IP from the environment variables. This allows
testers to specify the IP address in cases where there is more than one
configured IP address for the test system.
"""
env = LOCAL_IPv6_ENV if v6 else LOCAL_IP_ENV
ip = os.environ.get(env)
if not ip:
logger.debug("%s not set; try to auto detect IP.", env)
socket_type = socket.AF_INET6 if v6 else socket.AF_INET
s = socket.socket(socket_type, socket.SOCK_DGRAM)
remote_ip = "2001:4860:4860::8888" if v6 else "8.8.8.8"
s.connect((remote_ip, 0))
ip = s.getsockname()[0]
s.close()
else:
logger.debug("Got local IP from %s=%s", env, ip)
return ip
# Some of the commands we execute like to mess with the TTY configuration,
# which can break the output formatting. As a wrokaround, save off the
# terminal settings and restore them after each command.
_term_settings = termios.tcgetattr(sys.stdin.fileno())
def log_and_run(command, raise_exception_on_failure=True, stderr=STDOUT):
def log_output(results):
if results is None:
logger.info(" # <no output>")
lines = results.split("\n")
for line in lines:
logger.info(" # %s", line.rstrip())
try:
logger.info("%s", command)
try:
results = check_output(command, shell=True, stderr=stderr).rstrip()
finally:
# Restore terminal settings in case the command we ran manipulated
# them. Note: under concurrent access, this is still not a perfect
# solution since another thread's child process may break the
# settings again before we log below.
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, _term_settings)
log_output(results)
return results
except CalledProcessError as e:
# Wrap the original exception with one that gives a better error
# message (including command output).
logger.info(" # Return code: %s", e.returncode)
log_output(e.output)
if raise_exception_on_failure:
raise e
def curl_etcd(path, options=None, recursive=True, ip=None):
"""
Perform a curl to etcd, returning JSON decoded response.
:param path: The key path to query
:param options: Additional options to include in the curl
:param recursive: Whether we want recursive query or not
:return: The JSON decoded response.
"""
if options is None:
options = []
if ETCD_SCHEME == "https":
# Etcd is running with SSL/TLS, require key/certificates
rc = check_output(
"curl --cacert %s --cert %s --key %s "
"-sL https://%s:2379/v2/keys/%s?recursive=%s %s"
% (ETCD_CA, ETCD_CERT, ETCD_KEY, ETCD_HOSTNAME_SSL,
path, str(recursive).lower(), " ".join(options)),
shell=True)
else:
rc = check_output(
"curl -sL http://%s:2379/v2/keys/%s?recursive=%s %s"
% (ip, path, str(recursive).lower(), " ".join(options)),
shell=True)
logger.info("etcd RC: %s" % rc.strip())
return json.loads(rc.strip())
def wipe_etcd(ip):
# Delete /calico if it exists. This ensures each test has an empty data
# store at start of day.
curl_etcd("calico", options=["-XDELETE"], ip=ip)
# Disable Usage Reporting to usage.projectcalico.org
# We want to avoid polluting analytics data with unit test noise
curl_etcd("calico/v1/config/UsageReportingEnabled",
options=["-XPUT -d value=False"], ip=ip)
etcd_container_name = "calico-etcd"
tls_vars = ""
if ETCD_SCHEME == "https":
# Etcd is running with SSL/TLS, require key/certificates
etcd_container_name = "calico-etcd-ssl"
tls_vars = ("ETCDCTL_CACERT=/etc/calico/certs/ca.pem " +
"ETCDCTL_CERT=/etc/calico/certs/client.pem " +
"ETCDCTL_KEY=/etc/calico/certs/client-key.pem ")
check_output("docker exec " + etcd_container_name + " sh -c '" + tls_vars +
"ETCDCTL_API=3 etcdctl del --prefix /calico" +
"'", shell=True)
def make_list(kind, items):
"""
Convert the list of resources into a single List resource type.
Args:
items: A list of the resources in the List object.
Returns:
None
"""
assert isinstance(items, list)
if "List" not in kind:
kind = kind + "List"
return {
'kind': kind,
'apiVersion': API_VERSION,
'items': items,
}
def name(data):
"""
Returns the name of the resource in the supplied data
Args:
data: A dictionary containing the resource.
Returns: The resource name.
"""
return data['metadata']['name']
def namespace(data):
"""
Returns the namespace of the resource in the supplied data
Args:
data: A dictionary containing the resource.
Returns: The resource name.
"""
return data['metadata']['namespace']
def set_cluster_version(calico_version="", kdd=False):
"""
Set Calico version in ClusterInformation using the calico_version_helper go app.
Args:
calico_version: string with version to set
kdd: optional bool to indicate use of kubernetes datastore (default False)
Returns: The command output
"""
if ETCD_SCHEME == "https":
etcd_auth = "%s:2379" % ETCD_HOSTNAME_SSL
else:
etcd_auth = "%s:2379" % get_ip()
calico_helper_bin = "/code/tests/fv/helper/bin/calico_version_helper"
full_cmd = "export ETCD_ENDPOINTS=%s; " \
"export ETCD_CA_CERT_FILE=%s; " \
"export ETCD_CERT_FILE=%s; " \
"export ETCD_KEY_FILE=%s; " \
"export DATASTORE_TYPE=%s; %s" % \
(ETCD_SCHEME+"://"+etcd_auth, ETCD_CA, ETCD_CERT, ETCD_KEY,
"etcdv3", calico_helper_bin)
if kdd:
full_cmd = "export DATASTORE_TYPE=kubernetes; " \
"export KUBECONFIG=%s; %s" % \
(KUBECONFIG, calico_helper_bin)
if calico_version:
full_cmd += " -v " + calico_version
try:
output = log_and_run(full_cmd, stderr=STDOUT)
return CalicoctlOutput(full_cmd, output)
except CalledProcessError as e:
return CalicoctlOutput(full_cmd, e.output, error=e.returncode)
| projectcalico/calico | calicoctl/tests/st/utils/utils.py | Python | apache-2.0 | 20,521 |
python manage.py collectstatic
python manage.py runserver --nostatic
urlpatterns += patterns('',
(r'^static/suit/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.DJANGO_SUIT_TEMPLATE}),
)
urlpatterns += patterns('',
(r'^static/admin/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.DJANGO_ADMIN_TEMPLATE}),
)
SITE_PATH = os.path.dirname(__file__)
REPO_ROOT = os.path.normpath(os.path.join(SITE_PATH, '..'))
MEDIA_ROOT = os.path.join(REPO_ROOT, 'public/media')
DJANGO_SUIT_TEMPLATE = os.path.join(REPO_ROOT, 'static/suit')
DJANGO_EDITOR = os.path.join(REPO_ROOT, 'static/django_summernote')
DJANGO_ADMIN_TEMPLATE = os.path.join(REPO_ROOT, 'static/admin')
| pisun2/python | static.py | Python | apache-2.0 | 711 |
#!/usr/bin/env python
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from importlib import import_module
import logging
import os
import sys
import click
from colorlog import ColoredFormatter
logger = logging.getLogger(__name__)
def setup_logging(): # pragma: no cover
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = ColoredFormatter(
"%(log_color)s%(levelname)-8s%(reset)s %(asctime)s %(green)s%(name)s"
"%(reset)s %(message)s",
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'blue',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
}
)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def import_queue(location):
module, attr = location.rsplit('.', 1)
module = import_module(module)
queue = getattr(module, attr)
if hasattr(queue, '__call__'):
queue = queue()
return queue
@click.command()
@click.option(
'--path', '-p',
help='Import path. By default, this is the current working directory.')
@click.option(
'--pid',
help='Write the process ID to the specified file.')
@click.argument(
'queue',
nargs=1,
required=True)
def main(path, pid, queue):
"""
Standalone PSQ worker.
The queue argument must be the full importable path to a psq.Queue
instance.
Example usage:
psqworker config.q
psqworker --path /opt/app queues.fast
"""
setup_logging()
if pid:
with open(os.path.expanduser(pid), "w") as f:
f.write(str(os.getpid()))
if not path:
path = os.getcwd()
sys.path.insert(0, path)
queue = import_queue(queue)
import psq
worker = psq.Worker(queue=queue)
worker.listen()
if __name__ == '__main__':
main()
| GoogleCloudPlatform/psq | psq/psqworker.py | Python | apache-2.0 | 2,499 |
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import tempfile
import fixtures
from lxml import etree
from oslo_config import cfg
import requests
import testtools
from testtools import content as test_content
from testtools import matchers
import urllib.parse as urlparse
from os_collect_config import cfn
from os_collect_config import collect
from os_collect_config import exc
META_DATA = {u'int1': 1,
u'strfoo': u'foo',
u'map_ab': {
u'a': 'apple',
u'b': 'banana',
}}
SOFTWARE_CONFIG_DATA = {
u'old-style': u'value',
u'deployments': [
{
u'inputs': [
{
u'type': u'String',
u'name': u'input1',
u'value': u'value1'
}
],
u'group': 'Heat::Ungrouped',
u'name': 'dep-name1',
u'outputs': None,
u'options': None,
u'config': {
u'config1': 'value1'
}
},
{
u'inputs': [
{
u'type': u'String',
u'name': u'input1',
u'value': u'value1'
}
],
u'group': 'os-apply-config',
u'name': 'dep-name2',
u'outputs': None,
u'options': None,
u'config': {
u'config2': 'value2'
}
},
{
u'inputs': [
{
u'type': u'String',
u'name': u'input1',
u'value': u'value1'
}
],
u'name': 'dep-name3',
u'outputs': None,
u'options': None,
u'config': {
u'config3': 'value3'
}
},
{
u'inputs': [],
u'group': 'ignore_me',
u'name': 'ignore_me_name',
u'outputs': None,
u'options': None,
u'config': 'ignore_me_config'
}
]
}
SOFTWARE_CONFIG_IMPOSTER_DATA = {
u'old-style': u'value',
u'deployments': {
u"not": u"a list"
}
}
class FakeResponse(dict):
def __init__(self, text):
self.text = text
def raise_for_status(self):
pass
class FakeReqSession(object):
SESSION_META_DATA = META_DATA
def __init__(self, testcase, expected_netloc):
self._test = testcase
self._expected_netloc = expected_netloc
self.verify = False
def get(self, url, params, headers, verify=None, timeout=None):
self._test.addDetail('url', test_content.text_content(url))
url = urlparse.urlparse(url)
self._test.assertEqual(self._expected_netloc, url.netloc)
self._test.assertEqual('/v1/', url.path)
self._test.assertEqual('application/json',
headers['Content-Type'])
self._test.assertIn('SignatureVersion', params)
self._test.assertEqual('2', params['SignatureVersion'])
self._test.assertIn('Signature', params)
self._test.assertIn('Action', params)
self._test.assertEqual('DescribeStackResource',
params['Action'])
self._test.assertIn('LogicalResourceId', params)
self._test.assertEqual('foo', params['LogicalResourceId'])
self._test.assertEqual(10, timeout)
root = etree.Element('DescribeStackResourceResponse')
result = etree.SubElement(root, 'DescribeStackResourceResult')
detail = etree.SubElement(result, 'StackResourceDetail')
metadata = etree.SubElement(detail, 'Metadata')
metadata.text = json.dumps(self.SESSION_META_DATA)
if verify is not None:
self.verify = True
return FakeResponse(etree.tostring(root))
class FakeRequests(object):
exceptions = requests.exceptions
def __init__(self, testcase, expected_netloc='192.0.2.1:8000'):
self._test = testcase
self._expected_netloc = expected_netloc
def Session(self):
return FakeReqSession(self._test, self._expected_netloc)
class FakeReqSessionSoftwareConfig(FakeReqSession):
SESSION_META_DATA = SOFTWARE_CONFIG_DATA
class FakeRequestsSoftwareConfig(FakeRequests):
FAKE_SESSION = FakeReqSessionSoftwareConfig
def Session(self):
return self.FAKE_SESSION(self._test, self._expected_netloc)
class FakeReqSessionConfigImposter(FakeReqSession):
SESSION_META_DATA = SOFTWARE_CONFIG_IMPOSTER_DATA
class FakeRequestsConfigImposter(FakeRequestsSoftwareConfig):
FAKE_SESSION = FakeReqSessionConfigImposter
class FakeFailRequests(object):
exceptions = requests.exceptions
class Session(object):
def get(self, url, params, headers, verify=None, timeout=None):
raise requests.exceptions.HTTPError(403, 'Forbidden')
class TestCfnBase(testtools.TestCase):
def setUp(self):
super(TestCfnBase, self).setUp()
self.log = self.useFixture(fixtures.FakeLogger())
self.useFixture(fixtures.NestedTempfile())
self.hint_file = tempfile.NamedTemporaryFile()
self.hint_file.write(u'http://192.0.2.1:8000'.encode('utf-8'))
self.hint_file.flush()
self.addCleanup(self.hint_file.close)
collect.setup_conf()
cfg.CONF.cfn.heat_metadata_hint = self.hint_file.name
cfg.CONF.cfn.metadata_url = None
cfg.CONF.cfn.path = ['foo.Metadata']
cfg.CONF.cfn.access_key_id = '0123456789ABCDEF'
cfg.CONF.cfn.secret_access_key = 'FEDCBA9876543210'
class TestCfn(TestCfnBase):
def test_collect_cfn(self):
cfn_md = cfn.Collector(requests_impl=FakeRequests(self)).collect()
self.assertThat(cfn_md, matchers.IsInstance(list))
self.assertEqual('cfn', cfn_md[0][0])
cfn_md = cfn_md[0][1]
for k in ('int1', 'strfoo', 'map_ab'):
self.assertIn(k, cfn_md)
self.assertEqual(cfn_md[k], META_DATA[k])
self.assertEqual('', self.log.output)
def test_collect_with_ca_cert(self):
cfn.CONF.cfn.ca_certificate = "foo"
collector = cfn.Collector(requests_impl=FakeRequests(self))
collector.collect()
self.assertTrue(collector._session.verify)
def test_collect_cfn_fail(self):
cfn_collect = cfn.Collector(requests_impl=FakeFailRequests)
self.assertRaises(exc.CfnMetadataNotAvailable, cfn_collect.collect)
self.assertIn('Forbidden', self.log.output)
def test_collect_cfn_no_path(self):
cfg.CONF.cfn.path = None
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotConfigured, cfn_collect.collect)
self.assertIn('No path configured', self.log.output)
def test_collect_cfn_bad_path(self):
cfg.CONF.cfn.path = ['foo']
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotConfigured, cfn_collect.collect)
self.assertIn('Path not in format', self.log.output)
def test_collect_cfn_no_metadata_url(self):
cfg.CONF.cfn.heat_metadata_hint = None
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotConfigured, cfn_collect.collect)
self.assertIn('No metadata_url configured', self.log.output)
def test_collect_cfn_missing_sub_path(self):
cfg.CONF.cfn.path = ['foo.Metadata.not_there']
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotAvailable, cfn_collect.collect)
self.assertIn('Sub-key not_there does not exist', self.log.output)
def test_collect_cfn_sub_path(self):
cfg.CONF.cfn.path = ['foo.Metadata.map_ab']
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
content = cfn_collect.collect()
self.assertThat(content, matchers.IsInstance(list))
self.assertEqual('cfn', content[0][0])
content = content[0][1]
self.assertIn(u'b', content)
self.assertEqual(u'banana', content[u'b'])
def test_collect_cfn_metadata_url_overrides_hint(self):
cfg.CONF.cfn.metadata_url = 'http://127.0.1.1:8000/v1/'
cfn_collect = cfn.Collector(
requests_impl=FakeRequests(self,
expected_netloc='127.0.1.1:8000'))
cfn_collect.collect()
class TestCfnSoftwareConfig(TestCfnBase):
def test_collect_cfn_software_config(self):
cfn_md = cfn.Collector(
requests_impl=FakeRequestsSoftwareConfig(self)).collect()
self.assertThat(cfn_md, matchers.IsInstance(list))
self.assertEqual('cfn', cfn_md[0][0])
cfn_config = cfn_md[0][1]
self.assertThat(cfn_config, matchers.IsInstance(dict))
self.assertEqual(set(['old-style', 'deployments']),
set(cfn_config.keys()))
self.assertIn('deployments', cfn_config)
self.assertThat(cfn_config['deployments'], matchers.IsInstance(list))
self.assertEqual(4, len(cfn_config['deployments']))
deployment = cfn_config['deployments'][0]
self.assertIn('inputs', deployment)
self.assertThat(deployment['inputs'], matchers.IsInstance(list))
self.assertEqual(1, len(deployment['inputs']))
self.assertEqual('dep-name1', cfn_md[1][0])
self.assertEqual('value1', cfn_md[1][1]['config1'])
self.assertEqual('dep-name2', cfn_md[2][0])
self.assertEqual('value2', cfn_md[2][1]['config2'])
def test_collect_cfn_deployments_not_list(self):
cfn_md = cfn.Collector(
requests_impl=FakeRequestsConfigImposter(self)).collect()
self.assertEqual(1, len(cfn_md))
self.assertEqual('cfn', cfn_md[0][0])
self.assertIn('not', cfn_md[0][1]['deployments'])
self.assertEqual('a list', cfn_md[0][1]['deployments']['not'])
| openstack/os-collect-config | os_collect_config/tests/test_cfn.py | Python | apache-2.0 | 10,601 |
import streamcorpus as sc
import cuttsum.events
import cuttsum.corpora
from cuttsum.trecdata import SCChunkResource
from cuttsum.pipeline import ArticlesResource, DedupedArticlesResource
import os
import pandas as pd
from datetime import datetime
from collections import defaultdict
import matplotlib.pylab as plt
plt.style.use('ggplot')
pd.set_option('display.max_rows', 500)
pd.set_option('display.width', 200)
import locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF8')
def format_int(x):
return locale.format("%d", x, grouping=True)
def epoch(dt):
return int((dt - datetime(1970, 1, 1)).total_seconds())
chunk_res = SCChunkResource()
articles_res = ArticlesResource()
ded_articles_res = DedupedArticlesResource()
data = []
event2ids = defaultdict(set)
fltr_event2ids = defaultdict(set)
for event in cuttsum.events.get_events():
corpus = cuttsum.corpora.get_raw_corpus(event)
hours = event.list_event_hours()
hour2ded = defaultdict(int)
hour2ded_fltr = defaultdict(int)
ded_df = ded_articles_res.get_stats_df(event, corpus, "goose", .8)
if ded_df is not None:
if event.query_num > 25:
for ids in ded_df["stream ids"].apply(eval).tolist():
for id1 in ids:
event2ids[event.fs_name()].add(id1)
for _, row in ded_df.iterrows():
dt = datetime.utcfromtimestamp(row["earliest"])
hour = datetime(dt.year, dt.month, dt.day, dt.hour)
hour2ded[hour] += 1
if row["match"] == True:
hour2ded_fltr[hour] += 1
hour2goose = defaultdict(int)
for hour in hours:
path = articles_res.get_chunk_path(event, "goose", hour, corpus)
if path is None:
continue
#print path
fname = os.path.split(path)[1]
num_goose = int(fname.split("-")[0])
hour2goose[hour] = num_goose
# goose_df = articles_res.get_stats_df(event, "goose")
# if goose_df is not None:
# for _, row in goose_df.iterrows():
# dt = datetime.utcfromtimestamp(row["hour"])
# hour = datetime(dt.year, dt.month, dt.day, dt.hour)
# hour2goose[hour] = row["goose articles"]
for hour in hours:
raw_chunks = chunk_res.get_chunks_for_hour(hour, corpus, event)
num_raw_si = 0
for chunk in raw_chunks:
fname = os.path.split(chunk)[1]
num_raw_si += int(fname.split("-")[1])
#num_fltr_si = len(articles_res.get_si(event, corpus, "goose", hour))
data.append({
"event": event.query_id,
"title": event.title,
"hour": hour,
"raw articles": num_raw_si,
"goose articles": hour2goose[hour],
"deduped articles": hour2ded[hour],
"deduped match articles": hour2ded_fltr[hour],
})
for event in cuttsum.events.get_events():
if event.query_num < 26: continue
corpus = cuttsum.corpora.FilteredTS2015()
hours = event.list_event_hours()
hour2ded = defaultdict(int)
hour2ded_fltr = defaultdict(int)
ded_df = ded_articles_res.get_stats_df(event, corpus, "goose", .8)
if ded_df is not None:
for ids in ded_df["stream ids"].apply(eval).tolist():
for id1 in ids:
fltr_event2ids[event.fs_name()].add(id1)
for _, row in ded_df.iterrows():
dt = datetime.utcfromtimestamp(row["earliest"])
hour = datetime(dt.year, dt.month, dt.day, dt.hour)
hour2ded[hour] += 1
if row["match"] == True:
hour2ded_fltr[hour] += 1
hour2goose = defaultdict(int)
for hour in hours:
path = articles_res.get_chunk_path(event, "goose", hour, corpus)
if path is None:
continue
print path
fname = os.path.split(path)[1]
num_goose = int(fname.split("-")[0])
hour2goose[hour] = num_goose
# goose_df = articles_res.get_stats_df(event, "goose")
# if goose_df is not None:
# for _, row in goose_df.iterrows():
# dt = datetime.utcfromtimestamp(row["hour"])
# hour = datetime(dt.year, dt.month, dt.day, dt.hour)
# hour2goose[hour] = row["goose articles"]
for hour in hours:
print hour
raw_chunks = chunk_res.get_chunks_for_hour(hour, corpus, event)
num_raw_si = 0
for chunk in raw_chunks:
fname = os.path.split(chunk)[1]
#num_raw_si += int(fname.split("-")[1])
with sc.Chunk(path=chunk, mode="rb", message=corpus.sc_msg()) as c:
for si in c:
num_raw_si += 1
#num_fltr_si = len(articles_res.get_si(event, corpus, "goose", hour))
data.append({
"event": event.query_id + " (filtered)",
"title": event.title,
"hour": hour,
"raw articles": num_raw_si,
"goose articles": hour2goose[hour],
"deduped articles": hour2ded[hour],
"deduped match articles": hour2ded_fltr[hour],
})
df = pd.DataFrame(data)
cols = ["raw articles", "goose articles", "deduped articles",
"deduped match articles"]
df_sum = df.groupby("event")[cols].sum()
df_sum["raw articles"] = df_sum["raw articles"].apply(format_int)
df_sum["goose articles"] = df_sum["goose articles"].apply(format_int)
df_sum["deduped articles"] = df_sum["deduped articles"].apply(format_int)
df_sum["deduped match articles"] = df_sum["deduped match articles"].apply(format_int)
print df_sum
print
coverage = []
for event in cuttsum.events.get_events():
if event.query_num < 26: continue
isect = event2ids[event.fs_name()].intersection(fltr_event2ids[event.fs_name()])
n_isect = len(isect)
n_unfltr = max(len(event2ids[event.fs_name()]), 1)
n_fltr = max(len(fltr_event2ids[event.fs_name()]), 1)
print event.fs_name()
print n_isect, float(n_isect) / n_fltr, float(n_isect) / n_unfltr
coverage.append({
"event": event.query_id,
"intersection": n_isect,
"isect/n_2015F": float(n_isect) / n_fltr,
"isect/n_2014": float(n_isect) / n_unfltr,
})
df = pd.DataFrame(coverage)
df_u = df.mean()
df_u["event"] = "mean"
print pd.concat([df, df_u.to_frame().T]).set_index("event")
exit()
with open("article_count.tex", "w") as f:
f.write(df_sum.to_latex())
import os
if not os.path.exists("plots"):
os.makedirs("plots")
import cuttsum.judgements
ndf = cuttsum.judgements.get_merged_dataframe()
for (event, title), group in df.groupby(["event", "title"]):
matches = ndf[ndf["query id"] == event]
#fig = plt.figure()
group = group.set_index(["hour"])
#ax = group[["goose articles", "deduped articles", "deduped match articles"]].plot()
linex = epoch(group.index[10])
ax = plt.plot(group.index, group["goose articles"], label="goose")
ax = plt.plot(group.index, group["deduped articles"], label="dedupe")
ax = plt.plot(group.index, group["deduped match articles"], label="dedupe qmatch")
for nugget, ngroup in matches.groupby("nugget id"):
times = ngroup["update id"].apply(lambda x: datetime.utcfromtimestamp(int(x.split("-")[0])))
#ngroup = ngroup.sort("timestamp")
times.sort()
times = times.reset_index(drop=True)
if len(times) == 0: continue
plt.plot_date(
(times[0], times[0]),
(0, plt.ylim()[1]),
'--', color="black", linewidth=.5, alpha=.5)
plt.gcf().autofmt_xdate()
plt.gcf().suptitle(title)
plt.gcf().savefig(os.path.join("plots", "{}-stream.png".format(event)))
plt.close("all")
| kedz/cuttsum | trec2015/sbin/reports/raw-stream-count.py | Python | apache-2.0 | 7,738 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
class ServersTestJSON(base.BaseV2ComputeTest):
disk_config = 'AUTO'
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServersTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ServersTestJSON, cls).setup_clients()
cls.client = cls.servers_client
cls.networks_client = cls.os.networks_client
cls.subnets_client = cls.os.subnets_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(ServersTestJSON, cls).resource_setup()
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
cls.name = data_utils.rand_name(cls.__name__ + '-server')
cls.password = data_utils.rand_password()
disk_config = cls.disk_config
cls.server_initial = cls.create_test_server(
validatable=True,
wait_until='ACTIVE',
name=cls.name,
metadata=cls.meta,
accessIPv4=cls.accessIPv4,
accessIPv6=cls.accessIPv6,
disk_config=disk_config,
adminPass=cls.password)
cls.server = (cls.client.show_server(cls.server_initial['id'])
['server'])
def _create_net_subnet_ret_net_from_cidr(self, cidr):
name_net = data_utils.rand_name(self.__class__.__name__)
net = self.networks_client.create_network(name=name_net)
self.addCleanup(self.networks_client.delete_network,
net['network']['id'])
subnet = self.subnets_client.create_subnet(
network_id=net['network']['id'],
cidr=cidr,
ip_version=4)
self.addCleanup(self.subnets_client.delete_subnet,
subnet['subnet']['id'])
return net
@test.attr(type='smoke')
@test.idempotent_id('5de47127-9977-400a-936f-abcfbec1218f')
def test_verify_server_details(self):
# Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
# NOTE(maurosr): See http://tools.ietf.org/html/rfc5952 (section 4)
# Here we compare directly with the canonicalized format.
self.assertEqual(self.server['accessIPv6'],
str(netaddr.IPAddress(self.accessIPv6)))
self.assertEqual(self.name, self.server['name'])
self.assertEqual(self.image_ref, self.server['image']['id'])
self.assertEqual(self.flavor_ref, self.server['flavor']['id'])
self.assertEqual(self.meta, self.server['metadata'])
@test.attr(type='smoke')
@test.idempotent_id('9a438d88-10c6-4bcd-8b5b-5b6e25e1346f')
def test_list_servers(self):
# The created server should be in the list of all servers
body = self.client.list_servers()
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
@test.idempotent_id('585e934c-448e-43c4-acbf-d06a9b899997')
def test_list_servers_with_detail(self):
# The created server should be in the detailed list of all servers
body = self.client.list_servers(detail=True)
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
@test.idempotent_id('cbc0f52f-05aa-492b-bdc1-84b575ca294b')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_verify_created_server_vcpus(self):
# Verify that the number of vcpus reported by the instance matches
# the amount stated by the flavor
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
linux_client = remote_client.RemoteClient(
self.get_server_ip(self.server),
self.ssh_user,
self.password,
self.validation_resources['keypair']['private_key'],
server=self.server,
servers_client=self.client)
self.assertEqual(flavor['vcpus'], linux_client.get_number_of_vcpus())
@test.idempotent_id('ac1ad47f-984b-4441-9274-c9079b7a0666')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_host_name_is_same_as_server_name(self):
# Verify the instance host name is the same as the server name
linux_client = remote_client.RemoteClient(
self.get_server_ip(self.server),
self.ssh_user,
self.password,
self.validation_resources['keypair']['private_key'],
server=self.server,
servers_client=self.client)
hostname = linux_client.get_hostname()
msg = ('Failed while verifying servername equals hostname. Expected '
'hostname "%s" but got "%s".' % (self.name, hostname))
self.assertEqual(self.name.lower(), hostname, msg)
@test.idempotent_id('ed20d3fb-9d1f-4329-b160-543fbd5d9811')
@testtools.skipUnless(
test.is_scheduler_filter_enabled("ServerGroupAffinityFilter"),
'ServerGroupAffinityFilter is not available.')
def test_create_server_with_scheduler_hint_group(self):
# Create a server with the scheduler hint "group".
group_id = self.create_test_server_group()['id']
hints = {'group': group_id}
server = self.create_test_server(scheduler_hints=hints,
wait_until='ACTIVE')
# Check a server is in the group
server_group = (self.server_groups_client.show_server_group(group_id)
['server_group'])
self.assertIn(server['id'], server_group['members'])
@test.idempotent_id('0578d144-ed74-43f8-8e57-ab10dbf9b3c2')
@testtools.skipUnless(CONF.service_available.neutron,
'Neutron service must be available.')
def test_verify_multiple_nics_order(self):
# Verify that the networks order given at the server creation is
# preserved within the server.
net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
networks = [{'uuid': net1['network']['id']},
{'uuid': net2['network']['id']}]
server_multi_nics = self.create_test_server(
networks=networks, wait_until='ACTIVE')
# Cleanup server; this is needed in the test case because with the LIFO
# nature of the cleanups, if we don't delete the server first, the port
# will still be part of the subnet and we'll get a 409 from Neutron
# when trying to delete the subnet. The tear down in the base class
# will try to delete the server and get a 404 but it's ignored so
# we're OK.
def cleanup_server():
self.client.delete_server(server_multi_nics['id'])
waiters.wait_for_server_termination(self.client,
server_multi_nics['id'])
self.addCleanup(cleanup_server)
addresses = (self.client.list_addresses(server_multi_nics['id'])
['addresses'])
# We can't predict the ip addresses assigned to the server on networks.
# Sometimes the assigned addresses are ['19.80.0.2', '19.86.0.2'], at
# other times ['19.80.0.3', '19.86.0.3']. So we check if the first
# address is in first network, similarly second address is in second
# network.
addr = [addresses[net1['network']['name']][0]['addr'],
addresses[net2['network']['name']][0]['addr']]
networks = [netaddr.IPNetwork('19.80.0.0/24'),
netaddr.IPNetwork('19.86.0.0/24')]
for address, network in zip(addr, networks):
self.assertIn(address, network)
@test.idempotent_id('1678d144-ed74-43f8-8e57-ab10dbf9b3c2')
@testtools.skipUnless(CONF.service_available.neutron,
'Neutron service must be available.')
def test_verify_duplicate_network_nics(self):
# Verify that server creation does not fail when more than one nic
# is created on the same network.
net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
networks = [{'uuid': net1['network']['id']},
{'uuid': net2['network']['id']},
{'uuid': net1['network']['id']}]
server_multi_nics = self.create_test_server(
networks=networks, wait_until='ACTIVE')
def cleanup_server():
self.client.delete_server(server_multi_nics['id'])
waiters.wait_for_server_termination(self.client,
server_multi_nics['id'])
self.addCleanup(cleanup_server)
addresses = (self.client.list_addresses(server_multi_nics['id'])
['addresses'])
addr = [addresses[net1['network']['name']][0]['addr'],
addresses[net2['network']['name']][0]['addr'],
addresses[net1['network']['name']][1]['addr']]
networks = [netaddr.IPNetwork('19.80.0.0/24'),
netaddr.IPNetwork('19.86.0.0/24'),
netaddr.IPNetwork('19.80.0.0/24')]
for address, network in zip(addr, networks):
self.assertIn(address, network)
class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
disk_config = 'AUTO'
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServersWithSpecificFlavorTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ServersWithSpecificFlavorTestJSON, cls).setup_clients()
cls.flavor_client = cls.os_adm.flavors_client
cls.client = cls.servers_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(ServersWithSpecificFlavorTestJSON, cls).resource_setup()
@test.idempotent_id('b3c7bcfc-bb5b-4e22-b517-c7f686b802ca')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
flavor_base = self.flavors_client.show_flavor(
self.flavor_ref)['flavor']
def create_flavor_with_ephemeral(ephem_disk):
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = flavor_base['ram']
vcpus = flavor_base['vcpus']
disk = flavor_base['disk']
if ephem_disk > 0:
# Create a flavor with ephemeral disk
flavor_name = data_utils.rand_name('eph_flavor')
flavor = self.flavor_client.create_flavor(
name=flavor_name, ram=ram, vcpus=vcpus, disk=disk,
id=flavor_with_eph_disk_id, ephemeral=ephem_disk)['flavor']
else:
# Create a flavor without ephemeral disk
flavor_name = data_utils.rand_name('no_eph_flavor')
flavor = self.flavor_client.create_flavor(
name=flavor_name, ram=ram, vcpus=vcpus, disk=disk,
id=flavor_with_eph_disk_id)['flavor']
self.addCleanup(flavor_clean_up, flavor['id'])
return flavor['id']
def flavor_clean_up(flavor_id):
self.flavor_client.delete_flavor(flavor_id)
self.flavor_client.wait_for_resource_deletion(flavor_id)
flavor_with_eph_disk_id = create_flavor_with_ephemeral(ephem_disk=1)
flavor_no_eph_disk_id = create_flavor_with_ephemeral(ephem_disk=0)
admin_pass = self.image_ssh_password
server_no_eph_disk = self.create_test_server(
validatable=True,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_no_eph_disk_id)
# Get partition number of server without ephemeral disk.
server_no_eph_disk = self.client.show_server(
server_no_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server_no_eph_disk),
self.ssh_user,
admin_pass,
self.validation_resources['keypair']['private_key'],
server=server_no_eph_disk,
servers_client=self.client)
partition_num = len(linux_client.get_partitions().split('\n'))
# Explicit server deletion necessary for Juno compatibility
self.client.delete_server(server_no_eph_disk['id'])
server_with_eph_disk = self.create_test_server(
validatable=True,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id)
server_with_eph_disk = self.client.show_server(
server_with_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server_with_eph_disk),
self.ssh_user,
admin_pass,
self.validation_resources['keypair']['private_key'],
server=server_with_eph_disk,
servers_client=self.client)
partition_num_emph = len(linux_client.get_partitions().split('\n'))
self.assertEqual(partition_num + 1, partition_num_emph)
class ServersTestManualDisk(ServersTestJSON):
disk_config = 'MANUAL'
@classmethod
def skip_checks(cls):
super(ServersTestManualDisk, cls).skip_checks()
if not CONF.compute_feature_enabled.disk_config:
msg = "DiskConfig extension not enabled."
raise cls.skipException(msg)
| Tesora/tesora-tempest | tempest/api/compute/servers/test_create_server.py | Python | apache-2.0 | 14,903 |
# Copyright 2019 Verily Life Sciences LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for hparams_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from classifaedes import hparams_lib
import tensorflow.compat.v1 as tf
class HparamsLibTest(tf.test.TestCase):
def testIndentedSerialize(self):
"""Tests that our slightly customized serialization can be parsed.
hparams_lib._human_serialize() uses indented JSON to improve readability.
"""
hps1 = hparams_lib.defaults()
serialized = hparams_lib._human_serialize(hps1)
hps2 = hparams_lib.defaults()
hps2.parse_json(serialized)
self.assertDictEqual(hps1.values(), hps2.values())
if __name__ == '__main__':
tf.test.main()
| verilylifesciences/classifaedes | classifaedes/hparams_lib_test.py | Python | apache-2.0 | 1,292 |
# -*- coding: utf-8 -*-
#
# portal.py # # # # # # # # # #
# #
# Copyright 2016 Giorgio Ladu <giorgio.ladu >at< gmail.com> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # # # # # #
import config
print 'Status: 302 Found'
print 'Location: http://' + config.custom_url
print ''
| giorgioladu/Rapa | portal.py | Python | apache-2.0 | 987 |
from paypalrestsdk import BillingAgreement
import logging
BILLING_AGREEMENT_ID = "I-HT38K76XPMGJ"
try:
billing_agreement = BillingAgreement.find(BILLING_AGREEMENT_ID)
print("Billing Agreement [%s] has state %s" % (billing_agreement.id, billing_agreement.state))
suspend_note = {
"note": "Suspending the agreement"
}
if billing_agreement.suspend(suspend_note):
# Would expect state has changed to Suspended
billing_agreement = BillingAgreement.find(BILLING_AGREEMENT_ID)
print("Billing Agreement [%s] has state %s" % (billing_agreement.id, billing_agreement.state))
reactivate_note = {
"note": "Reactivating the agreement"
}
if billing_agreement.reactivate(reactivate_note):
# Would expect state has changed to Active
billing_agreement = BillingAgreement.find(BILLING_AGREEMENT_ID)
print("Billing Agreement [%s] has state %s" % (billing_agreement.id, billing_agreement.state))
else:
print(billing_agreement.error)
else:
print(billing_agreement.error)
except ResourceNotFound as error:
print("Billing Agreement Not Found")
| stafur/pyTRUST | paypal-rest-api-sdk-python/samples/subscription/billing_agreements/suspend_and_re_activate.py | Python | apache-2.0 | 1,184 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the utility functions used by the placement API."""
import fixtures
from oslo_middleware import request_id
import webob
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement import util
from nova import objects
from nova import test
from nova.tests import uuidsentinel
class TestCheckAccept(test.NoDBTestCase):
"""Confirm behavior of util.check_accept."""
@staticmethod
@util.check_accept('application/json', 'application/vnd.openstack')
def handler(req):
"""Fake handler to test decorator."""
return True
def test_fail_no_match(self):
req = webob.Request.blank('/')
req.accept = 'text/plain'
error = self.assertRaises(webob.exc.HTTPNotAcceptable,
self.handler, req)
self.assertEqual(
'Only application/json, application/vnd.openstack is provided',
str(error))
def test_fail_complex_no_match(self):
req = webob.Request.blank('/')
req.accept = 'text/html;q=0.9,text/plain,application/vnd.aws;q=0.8'
error = self.assertRaises(webob.exc.HTTPNotAcceptable,
self.handler, req)
self.assertEqual(
'Only application/json, application/vnd.openstack is provided',
str(error))
def test_success_no_accept(self):
req = webob.Request.blank('/')
self.assertTrue(self.handler(req))
def test_success_simple_match(self):
req = webob.Request.blank('/')
req.accept = 'application/json'
self.assertTrue(self.handler(req))
def test_success_complex_any_match(self):
req = webob.Request.blank('/')
req.accept = 'application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
self.assertTrue(self.handler(req))
def test_success_complex_lower_quality_match(self):
req = webob.Request.blank('/')
req.accept = 'application/xml;q=0.9,application/vnd.openstack;q=0.8'
self.assertTrue(self.handler(req))
class TestExtractJSON(test.NoDBTestCase):
# Although the intent of this test class is not to test that
# schemas work, we may as well use a real one to ensure that
# behaviors are what we expect.
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"uuid": {"type": "string", "format": "uuid"}
},
"required": ["name"],
"additionalProperties": False
}
def test_not_json(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'I am a string',
self.schema)
self.assertIn('Malformed JSON', str(error))
def test_malformed_json(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"my bytes got left behind":}',
self.schema)
self.assertIn('Malformed JSON', str(error))
def test_schema_mismatch(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"a": "b"}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_type_invalid(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"name": 1}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_format_checker(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"name": "hello", "uuid": "not a uuid"}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_no_addtional_properties(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"name": "hello", "cow": "moo"}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_valid(self):
data = util.extract_json(
'{"name": "cow", '
'"uuid": "%s"}' % uuidsentinel.rp_uuid,
self.schema)
self.assertEqual('cow', data['name'])
self.assertEqual(uuidsentinel.rp_uuid, data['uuid'])
class TestJSONErrorFormatter(test.NoDBTestCase):
def setUp(self):
super(TestJSONErrorFormatter, self).setUp()
self.environ = {}
# TODO(jaypipes): Remove this when we get more than a single version
# in the placement API. The fact that we only had a single version was
# masking a bug in the utils code.
_versions = [
'1.0',
'1.1',
]
mod_str = 'nova.api.openstack.placement.microversion.VERSIONS'
self.useFixture(fixtures.MonkeyPatch(mod_str, _versions))
def test_status_to_int_code(self):
body = ''
status = '404 Not Found'
title = ''
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual(404, result['errors'][0]['status'])
def test_strip_body_tags(self):
body = '<h1>Big Error!</h1>'
status = '400 Bad Request'
title = ''
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual('Big Error!', result['errors'][0]['detail'])
def test_request_id_presence(self):
body = ''
status = '400 Bad Request'
title = ''
# no request id in environ, none in error
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertNotIn('request_id', result['errors'][0])
# request id in environ, request id in error
self.environ[request_id.ENV_REQUEST_ID] = 'stub-id'
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual('stub-id', result['errors'][0]['request_id'])
def test_microversion_406_handling(self):
body = ''
status = '400 Bad Request'
title = ''
# Not a 406, no version info required.
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertNotIn('max_version', result['errors'][0])
self.assertNotIn('min_version', result['errors'][0])
# A 406 but not because of microversions (microversion
# parsing was successful), no version info
# required.
status = '406 Not Acceptable'
version_obj = microversion.parse_version_string('2.3')
self.environ[microversion.MICROVERSION_ENVIRON] = version_obj
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertNotIn('max_version', result['errors'][0])
self.assertNotIn('min_version', result['errors'][0])
# Microversion parsing failed, status is 406, send version info.
del self.environ[microversion.MICROVERSION_ENVIRON]
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual(microversion.max_version_string(),
result['errors'][0]['max_version'])
self.assertEqual(microversion.min_version_string(),
result['errors'][0]['min_version'])
class TestRequireContent(test.NoDBTestCase):
"""Confirm behavior of util.require_accept."""
@staticmethod
@util.require_content('application/json')
def handler(req):
"""Fake handler to test decorator."""
return True
def test_fail_no_content_type(self):
req = webob.Request.blank('/')
error = self.assertRaises(webob.exc.HTTPUnsupportedMediaType,
self.handler, req)
self.assertEqual(
'The media type None is not supported, use application/json',
str(error))
def test_fail_wrong_content_type(self):
req = webob.Request.blank('/')
req.content_type = 'text/plain'
error = self.assertRaises(webob.exc.HTTPUnsupportedMediaType,
self.handler, req)
self.assertEqual(
'The media type text/plain is not supported, use application/json',
str(error))
def test_success_content_type(self):
req = webob.Request.blank('/')
req.content_type = 'application/json'
self.assertTrue(self.handler(req))
class TestPlacementURLs(test.NoDBTestCase):
def setUp(self):
super(TestPlacementURLs, self).setUp()
self.resource_provider = objects.ResourceProvider(
name=uuidsentinel.rp_name,
uuid=uuidsentinel.rp_uuid)
def test_resource_provider_url(self):
environ = {}
expected_url = '/resource_providers/%s' % uuidsentinel.rp_uuid
self.assertEqual(expected_url, util.resource_provider_url(
environ, self.resource_provider))
def test_resource_provider_url_prefix(self):
# SCRIPT_NAME represents the mount point of a WSGI
# application when it is hosted at a path/prefix.
environ = {'SCRIPT_NAME': '/placement'}
expected_url = ('/placement/resource_providers/%s'
% uuidsentinel.rp_uuid)
self.assertEqual(expected_url, util.resource_provider_url(
environ, self.resource_provider))
def test_inventories_url(self):
environ = {}
expected_url = ('/resource_providers/%s/inventories'
% uuidsentinel.rp_uuid)
self.assertEqual(expected_url, util.inventory_url(
environ, self.resource_provider))
def test_inventory_url(self):
resource_class = 'DISK_GB'
environ = {}
expected_url = ('/resource_providers/%s/inventories/%s'
% (uuidsentinel.rp_uuid, resource_class))
self.assertEqual(expected_url, util.inventory_url(
environ, self.resource_provider, resource_class))
| sebrandon1/nova | nova/tests/unit/api/openstack/placement/test_util.py | Python | apache-2.0 | 10,993 |
from .test_antivirus import AbstractTests
import modules.antivirus.avg.avg as module
import modules.antivirus.base as base
from mock import patch
from pathlib import Path
class TestAvg(AbstractTests.TestAntivirus):
name = "AVG AntiVirus Free (Linux)"
scan_path = Path("/usr/bin/avgscan")
scan_args = ('--heur', '--paranoid', '--arc', '--macrow', '--pwdw',
'--pup')
module = module.AVGAntiVirusFree
scan_clean_stdout = """AVG command line Anti-Virus scanner
Copyright (c) 2013 AVG Technologies CZ
Virus database version: 4793/15678
Virus database release date: Mon, 21 May 2018 13:00:00 +0000
Files scanned : 1(1)
Infections found : 0(0)
PUPs found : 0
Files healed : 0
Warnings reported : 0
Errors reported : 0
"""
scan_virus_retcode = 4
virusname = "EICAR_Test"
scan_virus_stdout = """AVG command line Anti-Virus scanner
Copyright (c) 2013 AVG Technologies CZ
Virus database version: 4793/15678
Virus database release date: Mon, 21 May 2018 13:00:00 +0000
eicar.com.txt Virus identified EICAR_Test
Files scanned : 1(1)
Infections found : 1(1)
PUPs found : 0
Files healed : 0
Warnings reported : 0
Errors reported : 0
"""
version = "13.0.3118"
virus_database_version = "4793/15678 (21 May 2018)"
version_stdout = """AVG command line controller
Copyright (c) 2013 AVG Technologies CZ
------ AVG status ------
AVG version : 13.0.3118
Components version : Aspam:3111, Cfg:3109, Cli:3115, Common:3110, Core:4793, Doc:3115, Ems:3111, Initd:3113, Lng:3112, Oad:3118, Other:3109, Scan:3115, Sched:3110, Update:3109
Last update : Tue, 22 May 2018 07:52:31 +0000
------ License status ------
License number : LUOTY-674PL-VRWOV-APYEG-ZXHMA-E
License version : 10
License type : FREE
License expires on :
Registered user :
Registered company :
------ WD status ------
Component State Restarts UpTime
Avid running 0 13 minute(s)
Oad running 0 13 minute(s)
Sched running 0 13 minute(s)
Tcpd running 0 13 minute(s)
Update stopped 0 -
------ Sched status ------
Task name Next runtime Last runtime
Virus update Tue, 22 May 2018 18:04:00 +0000 Tue, 22 May 2018 07:46:29 +0000
Program update - -
User counting Wed, 23 May 2018 07:46:29 +0000 Tue, 22 May 2018 07:46:29 +0000
------ Tcpd status ------
E-mails checked : 0
SPAM messages : 0
Phishing messages : 0
E-mails infected : 0
E-mails dropped : 0
------ Avid status ------
Virus database reload times : 0
Virus database version : 4793/15678
Virus database release date : Mon, 21 May 2018 13:00:00 +0000
Virus database shared in memory : yes
------ Oad status ------
Files scanned : 0(0)
Infections found : 0(0)
PUPs found : 0
Files healed : 0
Warnings reported : 0
Errors reported : 0
Operation successful.
""" # nopep8
@patch.object(base.AntivirusUnix, "locate")
@patch.object(base.AntivirusUnix, "locate_one")
@patch.object(base.AntivirusUnix, "run_cmd")
def setUp(self, m_run_cmd, m_locate_one, m_locate):
m_run_cmd.return_value = 0, self.version_stdout, ""
m_locate_one.return_value = self.scan_path
m_locate.return_value = self.database
super().setUp()
@patch.object(module, "locate_one")
@patch.object(base.AntivirusUnix, "run_cmd")
def test_get_virus_db_error(self, m_run_cmd, m_locate_one):
m_locate_one.return_value = self.scan_path
m_run_cmd.return_value = -1, self.version_stdout, ""
with self.assertRaises(RuntimeError):
self.plugin.get_virus_database_version()
@patch.object(module, "locate_one")
@patch.object(base.AntivirusUnix, "run_cmd")
def test_get_virus_db_no_version(self, m_run_cmd, m_locate_one):
m_locate_one.return_value = self.scan_path
wrong_stdout = "LOREM IPSUM"
m_run_cmd.return_value = 0, wrong_stdout, ""
with self.assertRaises(RuntimeError):
self.plugin.get_virus_database_version()
@patch.object(module, "locate_one")
@patch.object(base.AntivirusUnix, "run_cmd")
def test_get_virus_db_no_release(self, m_run_cmd, m_locate_one):
m_locate_one.return_value = self.scan_path
wrong_stdout = "Virus database version : 4793/15678"
m_run_cmd.return_value = 0, wrong_stdout, ""
version = self.plugin.get_virus_database_version()
self.assertEquals(version, "4793/15678")
| quarkslab/irma | probe/tests/modules/antivirus/test_avg.py | Python | apache-2.0 | 4,618 |
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
class TestQuery(unittest.TestCase):
_PROJECT = 'PROJECT'
@staticmethod
def _get_target_class():
from google.cloud.datastore.query import Query
return Query
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _make_client(self):
return _Client(self._PROJECT)
def test_ctor_defaults(self):
client = self._make_client()
query = self._make_one(client)
self.assertIs(query._client, client)
self.assertEqual(query.project, client.project)
self.assertIsNone(query.kind)
self.assertEqual(query.namespace, client.namespace)
self.assertIsNone(query.ancestor)
self.assertEqual(query.filters, [])
self.assertEqual(query.projection, [])
self.assertEqual(query.order, [])
self.assertEqual(query.distinct_on, [])
def test_ctor_explicit(self):
from google.cloud.datastore.key import Key
_PROJECT = 'OTHER_PROJECT'
_KIND = 'KIND'
_NAMESPACE = 'OTHER_NAMESPACE'
client = self._make_client()
ancestor = Key('ANCESTOR', 123, project=_PROJECT)
FILTERS = [('foo', '=', 'Qux'), ('bar', '<', 17)]
PROJECTION = ['foo', 'bar', 'baz']
ORDER = ['foo', 'bar']
DISTINCT_ON = ['foo']
query = self._make_one(
client,
kind=_KIND,
project=_PROJECT,
namespace=_NAMESPACE,
ancestor=ancestor,
filters=FILTERS,
projection=PROJECTION,
order=ORDER,
distinct_on=DISTINCT_ON,
)
self.assertIs(query._client, client)
self.assertEqual(query.project, _PROJECT)
self.assertEqual(query.kind, _KIND)
self.assertEqual(query.namespace, _NAMESPACE)
self.assertEqual(query.ancestor.path, ancestor.path)
self.assertEqual(query.filters, FILTERS)
self.assertEqual(query.projection, PROJECTION)
self.assertEqual(query.order, ORDER)
self.assertEqual(query.distinct_on, DISTINCT_ON)
def test_ctor_bad_projection(self):
BAD_PROJECTION = object()
self.assertRaises(TypeError, self._make_one, self._make_client(),
projection=BAD_PROJECTION)
def test_ctor_bad_order(self):
BAD_ORDER = object()
self.assertRaises(TypeError, self._make_one, self._make_client(),
order=BAD_ORDER)
def test_ctor_bad_distinct_on(self):
BAD_DISTINCT_ON = object()
self.assertRaises(TypeError, self._make_one, self._make_client(),
distinct_on=BAD_DISTINCT_ON)
def test_ctor_bad_filters(self):
FILTERS_CANT_UNPACK = [('one', 'two')]
self.assertRaises(ValueError, self._make_one, self._make_client(),
filters=FILTERS_CANT_UNPACK)
def test_namespace_setter_w_non_string(self):
query = self._make_one(self._make_client())
def _assign(val):
query.namespace = val
self.assertRaises(ValueError, _assign, object())
def test_namespace_setter(self):
_NAMESPACE = 'OTHER_NAMESPACE'
query = self._make_one(self._make_client())
query.namespace = _NAMESPACE
self.assertEqual(query.namespace, _NAMESPACE)
def test_kind_setter_w_non_string(self):
query = self._make_one(self._make_client())
def _assign(val):
query.kind = val
self.assertRaises(TypeError, _assign, object())
def test_kind_setter_wo_existing(self):
_KIND = 'KIND'
query = self._make_one(self._make_client())
query.kind = _KIND
self.assertEqual(query.kind, _KIND)
def test_kind_setter_w_existing(self):
_KIND_BEFORE = 'KIND_BEFORE'
_KIND_AFTER = 'KIND_AFTER'
query = self._make_one(self._make_client(), kind=_KIND_BEFORE)
self.assertEqual(query.kind, _KIND_BEFORE)
query.kind = _KIND_AFTER
self.assertEqual(query.project, self._PROJECT)
self.assertEqual(query.kind, _KIND_AFTER)
def test_ancestor_setter_w_non_key(self):
query = self._make_one(self._make_client())
def _assign(val):
query.ancestor = val
self.assertRaises(TypeError, _assign, object())
self.assertRaises(TypeError, _assign, ['KIND', 'NAME'])
def test_ancestor_setter_w_key(self):
from google.cloud.datastore.key import Key
_NAME = u'NAME'
key = Key('KIND', 123, project=self._PROJECT)
query = self._make_one(self._make_client())
query.add_filter('name', '=', _NAME)
query.ancestor = key
self.assertEqual(query.ancestor.path, key.path)
def test_ancestor_deleter_w_key(self):
from google.cloud.datastore.key import Key
key = Key('KIND', 123, project=self._PROJECT)
query = self._make_one(client=self._make_client(), ancestor=key)
del query.ancestor
self.assertIsNone(query.ancestor)
def test_add_filter_setter_w_unknown_operator(self):
query = self._make_one(self._make_client())
self.assertRaises(ValueError, query.add_filter,
'firstname', '~~', 'John')
def test_add_filter_w_known_operator(self):
query = self._make_one(self._make_client())
query.add_filter('firstname', '=', u'John')
self.assertEqual(query.filters, [('firstname', '=', u'John')])
def test_add_filter_w_all_operators(self):
query = self._make_one(self._make_client())
query.add_filter('leq_prop', '<=', u'val1')
query.add_filter('geq_prop', '>=', u'val2')
query.add_filter('lt_prop', '<', u'val3')
query.add_filter('gt_prop', '>', u'val4')
query.add_filter('eq_prop', '=', u'val5')
self.assertEqual(len(query.filters), 5)
self.assertEqual(query.filters[0], ('leq_prop', '<=', u'val1'))
self.assertEqual(query.filters[1], ('geq_prop', '>=', u'val2'))
self.assertEqual(query.filters[2], ('lt_prop', '<', u'val3'))
self.assertEqual(query.filters[3], ('gt_prop', '>', u'val4'))
self.assertEqual(query.filters[4], ('eq_prop', '=', u'val5'))
def test_add_filter_w_known_operator_and_entity(self):
from google.cloud.datastore.entity import Entity
query = self._make_one(self._make_client())
other = Entity()
other['firstname'] = u'John'
other['lastname'] = u'Smith'
query.add_filter('other', '=', other)
self.assertEqual(query.filters, [('other', '=', other)])
def test_add_filter_w_whitespace_property_name(self):
query = self._make_one(self._make_client())
PROPERTY_NAME = ' property with lots of space '
query.add_filter(PROPERTY_NAME, '=', u'John')
self.assertEqual(query.filters, [(PROPERTY_NAME, '=', u'John')])
def test_add_filter___key__valid_key(self):
from google.cloud.datastore.key import Key
query = self._make_one(self._make_client())
key = Key('Foo', project=self._PROJECT)
query.add_filter('__key__', '=', key)
self.assertEqual(query.filters, [('__key__', '=', key)])
def test_filter___key__not_equal_operator(self):
from google.cloud.datastore.key import Key
key = Key('Foo', project=self._PROJECT)
query = self._make_one(self._make_client())
query.add_filter('__key__', '<', key)
self.assertEqual(query.filters, [('__key__', '<', key)])
def test_filter___key__invalid_value(self):
query = self._make_one(self._make_client())
self.assertRaises(ValueError, query.add_filter, '__key__', '=', None)
def test_projection_setter_empty(self):
query = self._make_one(self._make_client())
query.projection = []
self.assertEqual(query.projection, [])
def test_projection_setter_string(self):
query = self._make_one(self._make_client())
query.projection = 'field1'
self.assertEqual(query.projection, ['field1'])
def test_projection_setter_non_empty(self):
query = self._make_one(self._make_client())
query.projection = ['field1', 'field2']
self.assertEqual(query.projection, ['field1', 'field2'])
def test_projection_setter_multiple_calls(self):
_PROJECTION1 = ['field1', 'field2']
_PROJECTION2 = ['field3']
query = self._make_one(self._make_client())
query.projection = _PROJECTION1
self.assertEqual(query.projection, _PROJECTION1)
query.projection = _PROJECTION2
self.assertEqual(query.projection, _PROJECTION2)
def test_keys_only(self):
query = self._make_one(self._make_client())
query.keys_only()
self.assertEqual(query.projection, ['__key__'])
def test_key_filter_defaults(self):
from google.cloud.datastore.key import Key
client = self._make_client()
query = self._make_one(client)
self.assertEqual(query.filters, [])
key = Key('Kind', 1234, project='project')
query.key_filter(key)
self.assertEqual(query.filters, [('__key__', '=', key)])
def test_key_filter_explicit(self):
from google.cloud.datastore.key import Key
client = self._make_client()
query = self._make_one(client)
self.assertEqual(query.filters, [])
key = Key('Kind', 1234, project='project')
query.key_filter(key, operator='>')
self.assertEqual(query.filters, [('__key__', '>', key)])
def test_order_setter_empty(self):
query = self._make_one(self._make_client(), order=['foo', '-bar'])
query.order = []
self.assertEqual(query.order, [])
def test_order_setter_string(self):
query = self._make_one(self._make_client())
query.order = 'field'
self.assertEqual(query.order, ['field'])
def test_order_setter_single_item_list_desc(self):
query = self._make_one(self._make_client())
query.order = ['-field']
self.assertEqual(query.order, ['-field'])
def test_order_setter_multiple(self):
query = self._make_one(self._make_client())
query.order = ['foo', '-bar']
self.assertEqual(query.order, ['foo', '-bar'])
def test_distinct_on_setter_empty(self):
query = self._make_one(self._make_client(), distinct_on=['foo', 'bar'])
query.distinct_on = []
self.assertEqual(query.distinct_on, [])
def test_distinct_on_setter_string(self):
query = self._make_one(self._make_client())
query.distinct_on = 'field1'
self.assertEqual(query.distinct_on, ['field1'])
def test_distinct_on_setter_non_empty(self):
query = self._make_one(self._make_client())
query.distinct_on = ['field1', 'field2']
self.assertEqual(query.distinct_on, ['field1', 'field2'])
def test_distinct_on_multiple_calls(self):
_DISTINCT_ON1 = ['field1', 'field2']
_DISTINCT_ON2 = ['field3']
query = self._make_one(self._make_client())
query.distinct_on = _DISTINCT_ON1
self.assertEqual(query.distinct_on, _DISTINCT_ON1)
query.distinct_on = _DISTINCT_ON2
self.assertEqual(query.distinct_on, _DISTINCT_ON2)
def test_fetch_defaults_w_client_attr(self):
from google.cloud.datastore.query import Iterator
client = self._make_client()
query = self._make_one(client)
iterator = query.fetch()
self.assertIsInstance(iterator, Iterator)
self.assertIs(iterator._query, query)
self.assertIs(iterator.client, client)
self.assertIsNone(iterator.max_results)
self.assertEqual(iterator._offset, 0)
def test_fetch_w_explicit_client(self):
from google.cloud.datastore.query import Iterator
client = self._make_client()
other_client = self._make_client()
query = self._make_one(client)
iterator = query.fetch(limit=7, offset=8, client=other_client)
self.assertIsInstance(iterator, Iterator)
self.assertIs(iterator._query, query)
self.assertIs(iterator.client, other_client)
self.assertEqual(iterator.max_results, 7)
self.assertEqual(iterator._offset, 8)
class TestIterator(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.datastore.query import Iterator
return Iterator
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_constructor_defaults(self):
query = object()
client = object()
iterator = self._make_one(query, client)
self.assertFalse(iterator._started)
self.assertIs(iterator.client, client)
self.assertIsNotNone(iterator._item_to_value)
self.assertIsNone(iterator.max_results)
self.assertEqual(iterator.page_number, 0)
self.assertIsNone(iterator.next_page_token,)
self.assertEqual(iterator.num_results, 0)
self.assertIs(iterator._query, query)
self.assertIsNone(iterator._offset)
self.assertIsNone(iterator._end_cursor)
self.assertTrue(iterator._more_results)
def test_constructor_explicit(self):
query = object()
client = object()
limit = 43
offset = 9
start_cursor = b'8290\xff'
end_cursor = b'so20rc\ta'
iterator = self._make_one(
query, client, limit=limit, offset=offset,
start_cursor=start_cursor, end_cursor=end_cursor)
self.assertFalse(iterator._started)
self.assertIs(iterator.client, client)
self.assertIsNotNone(iterator._item_to_value)
self.assertEqual(iterator.max_results, limit)
self.assertEqual(iterator.page_number, 0)
self.assertEqual(iterator.next_page_token, start_cursor)
self.assertEqual(iterator.num_results, 0)
self.assertIs(iterator._query, query)
self.assertEqual(iterator._offset, offset)
self.assertEqual(iterator._end_cursor, end_cursor)
self.assertTrue(iterator._more_results)
def test__build_protobuf_empty(self):
from google.cloud.proto.datastore.v1 import query_pb2
from google.cloud.datastore.query import Query
client = _Client(None)
query = Query(client)
iterator = self._make_one(query, client)
pb = iterator._build_protobuf()
expected_pb = query_pb2.Query()
self.assertEqual(pb, expected_pb)
def test__build_protobuf_all_values(self):
from google.cloud.proto.datastore.v1 import query_pb2
from google.cloud.datastore.query import Query
client = _Client(None)
query = Query(client)
limit = 15
offset = 9
start_bytes = b'i\xb7\x1d'
start_cursor = 'abcd'
end_bytes = b'\xc3\x1c\xb3'
end_cursor = 'wxyz'
iterator = self._make_one(
query, client, limit=limit, offset=offset,
start_cursor=start_cursor, end_cursor=end_cursor)
self.assertEqual(iterator.max_results, limit)
iterator.num_results = 4
iterator._skipped_results = 1
pb = iterator._build_protobuf()
expected_pb = query_pb2.Query(
start_cursor=start_bytes,
end_cursor=end_bytes,
offset=offset - iterator._skipped_results,
)
expected_pb.limit.value = limit - iterator.num_results
self.assertEqual(pb, expected_pb)
def test__process_query_results(self):
from google.cloud.proto.datastore.v1 import query_pb2
iterator = self._make_one(None, None,
end_cursor='abcd')
self.assertIsNotNone(iterator._end_cursor)
entity_pbs = [
_make_entity('Hello', 9998, 'PRAHJEKT'),
]
cursor_as_bytes = b'\x9ai\xe7'
cursor = b'mmnn'
skipped_results = 4
more_results_enum = query_pb2.QueryResultBatch.NOT_FINISHED
response_pb = _make_query_response(
entity_pbs, cursor_as_bytes, more_results_enum, skipped_results)
result = iterator._process_query_results(response_pb)
self.assertEqual(result, entity_pbs)
self.assertEqual(iterator._skipped_results, skipped_results)
self.assertEqual(iterator.next_page_token, cursor)
self.assertTrue(iterator._more_results)
def test__process_query_results_done(self):
from google.cloud.proto.datastore.v1 import query_pb2
iterator = self._make_one(None, None,
end_cursor='abcd')
self.assertIsNotNone(iterator._end_cursor)
entity_pbs = [
_make_entity('World', 1234, 'PROJECT'),
]
cursor_as_bytes = b''
skipped_results = 44
more_results_enum = query_pb2.QueryResultBatch.NO_MORE_RESULTS
response_pb = _make_query_response(
entity_pbs, cursor_as_bytes, more_results_enum, skipped_results)
result = iterator._process_query_results(response_pb)
self.assertEqual(result, entity_pbs)
self.assertEqual(iterator._skipped_results, skipped_results)
self.assertIsNone(iterator.next_page_token)
self.assertFalse(iterator._more_results)
def test__process_query_results_bad_enum(self):
iterator = self._make_one(None, None)
more_results_enum = 999
response_pb = _make_query_response(
[], b'', more_results_enum, 0)
with self.assertRaises(ValueError):
iterator._process_query_results(response_pb)
def _next_page_helper(self, txn_id=None):
from google.cloud.iterator import Page
from google.cloud.proto.datastore.v1 import datastore_pb2
from google.cloud.proto.datastore.v1 import entity_pb2
from google.cloud.proto.datastore.v1 import query_pb2
from google.cloud.datastore.query import Query
more_enum = query_pb2.QueryResultBatch.NOT_FINISHED
result = _make_query_response([], b'', more_enum, 0)
project = 'prujekt'
ds_api = _make_datastore_api(result)
if txn_id is None:
client = _Client(project, datastore_api=ds_api)
else:
transaction = mock.Mock(id=txn_id, spec=['id'])
client = _Client(
project, datastore_api=ds_api, transaction=transaction)
query = Query(client)
iterator = self._make_one(query, client)
page = iterator._next_page()
self.assertIsInstance(page, Page)
self.assertIs(page._parent, iterator)
partition_id = entity_pb2.PartitionId(project_id=project)
if txn_id is None:
read_options = datastore_pb2.ReadOptions()
else:
read_options = datastore_pb2.ReadOptions(transaction=txn_id)
empty_query = query_pb2.Query()
ds_api.run_query.assert_called_once_with(
project, partition_id, read_options, query=empty_query)
def test__next_page(self):
self._next_page_helper()
def test__next_page_in_transaction(self):
txn_id = b'1xo1md\xe2\x98\x83'
self._next_page_helper(txn_id)
def test__next_page_no_more(self):
from google.cloud.datastore.query import Query
ds_api = _make_datastore_api()
client = _Client(None, datastore_api=ds_api)
query = Query(client)
iterator = self._make_one(query, client)
iterator._more_results = False
page = iterator._next_page()
self.assertIsNone(page)
ds_api.run_query.assert_not_called()
class Test__item_to_entity(unittest.TestCase):
def _call_fut(self, iterator, entity_pb):
from google.cloud.datastore.query import _item_to_entity
return _item_to_entity(iterator, entity_pb)
def test_it(self):
entity_pb = mock.sentinel.entity_pb
patch = mock.patch(
'google.cloud.datastore.helpers.entity_from_protobuf')
with patch as entity_from_protobuf:
result = self._call_fut(None, entity_pb)
self.assertIs(result, entity_from_protobuf.return_value)
entity_from_protobuf.assert_called_once_with(entity_pb)
class Test__pb_from_query(unittest.TestCase):
def _call_fut(self, query):
from google.cloud.datastore.query import _pb_from_query
return _pb_from_query(query)
def test_empty(self):
from google.cloud.proto.datastore.v1 import query_pb2
pb = self._call_fut(_Query())
self.assertEqual(list(pb.projection), [])
self.assertEqual(list(pb.kind), [])
self.assertEqual(list(pb.order), [])
self.assertEqual(list(pb.distinct_on), [])
self.assertEqual(pb.filter.property_filter.property.name, '')
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.op,
query_pb2.CompositeFilter.OPERATOR_UNSPECIFIED)
self.assertEqual(list(cfilter.filters), [])
self.assertEqual(pb.start_cursor, b'')
self.assertEqual(pb.end_cursor, b'')
self.assertEqual(pb.limit.value, 0)
self.assertEqual(pb.offset, 0)
def test_projection(self):
pb = self._call_fut(_Query(projection=['a', 'b', 'c']))
self.assertEqual([item.property.name for item in pb.projection],
['a', 'b', 'c'])
def test_kind(self):
pb = self._call_fut(_Query(kind='KIND'))
self.assertEqual([item.name for item in pb.kind], ['KIND'])
def test_ancestor(self):
from google.cloud.datastore.key import Key
from google.cloud.proto.datastore.v1 import query_pb2
ancestor = Key('Ancestor', 123, project='PROJECT')
pb = self._call_fut(_Query(ancestor=ancestor))
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.op, query_pb2.CompositeFilter.AND)
self.assertEqual(len(cfilter.filters), 1)
pfilter = cfilter.filters[0].property_filter
self.assertEqual(pfilter.property.name, '__key__')
ancestor_pb = ancestor.to_protobuf()
self.assertEqual(pfilter.value.key_value, ancestor_pb)
def test_filter(self):
from google.cloud.proto.datastore.v1 import query_pb2
query = _Query(filters=[('name', '=', u'John')])
query.OPERATORS = {
'=': query_pb2.PropertyFilter.EQUAL,
}
pb = self._call_fut(query)
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.op, query_pb2.CompositeFilter.AND)
self.assertEqual(len(cfilter.filters), 1)
pfilter = cfilter.filters[0].property_filter
self.assertEqual(pfilter.property.name, 'name')
self.assertEqual(pfilter.value.string_value, u'John')
def test_filter_key(self):
from google.cloud.datastore.key import Key
from google.cloud.proto.datastore.v1 import query_pb2
key = Key('Kind', 123, project='PROJECT')
query = _Query(filters=[('__key__', '=', key)])
query.OPERATORS = {
'=': query_pb2.PropertyFilter.EQUAL,
}
pb = self._call_fut(query)
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.op, query_pb2.CompositeFilter.AND)
self.assertEqual(len(cfilter.filters), 1)
pfilter = cfilter.filters[0].property_filter
self.assertEqual(pfilter.property.name, '__key__')
key_pb = key.to_protobuf()
self.assertEqual(pfilter.value.key_value, key_pb)
def test_order(self):
from google.cloud.proto.datastore.v1 import query_pb2
pb = self._call_fut(_Query(order=['a', '-b', 'c']))
self.assertEqual([item.property.name for item in pb.order],
['a', 'b', 'c'])
self.assertEqual([item.direction for item in pb.order],
[query_pb2.PropertyOrder.ASCENDING,
query_pb2.PropertyOrder.DESCENDING,
query_pb2.PropertyOrder.ASCENDING])
def test_distinct_on(self):
pb = self._call_fut(_Query(distinct_on=['a', 'b', 'c']))
self.assertEqual([item.name for item in pb.distinct_on],
['a', 'b', 'c'])
class _Query(object):
def __init__(self,
client=object(),
kind=None,
project=None,
namespace=None,
ancestor=None,
filters=(),
projection=(),
order=(),
distinct_on=()):
self._client = client
self.kind = kind
self.project = project
self.namespace = namespace
self.ancestor = ancestor
self.filters = filters
self.projection = projection
self.order = order
self.distinct_on = distinct_on
class _Client(object):
def __init__(self, project, datastore_api=None, namespace=None,
transaction=None):
self.project = project
self._datastore_api = datastore_api
self.namespace = namespace
self._transaction = transaction
@property
def current_transaction(self):
return self._transaction
def _make_entity(kind, id_, project):
from google.cloud.proto.datastore.v1 import entity_pb2
key = entity_pb2.Key()
key.partition_id.project_id = project
elem = key.path.add()
elem.kind = kind
elem.id = id_
return entity_pb2.Entity(key=key)
def _make_query_response(
entity_pbs, cursor_as_bytes, more_results_enum, skipped_results):
from google.cloud.proto.datastore.v1 import datastore_pb2
from google.cloud.proto.datastore.v1 import query_pb2
return datastore_pb2.RunQueryResponse(
batch=query_pb2.QueryResultBatch(
skipped_results=skipped_results,
end_cursor=cursor_as_bytes,
more_results=more_results_enum,
entity_results=[
query_pb2.EntityResult(entity=entity)
for entity in entity_pbs
],
),
)
def _make_datastore_api(result=None):
run_query = mock.Mock(return_value=result, spec=[])
return mock.Mock(run_query=run_query, spec=['run_query'])
| tartavull/google-cloud-python | datastore/tests/unit/test_query.py | Python | apache-2.0 | 26,883 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._query import query | kubeflow/kfp-tekton-backend | components/gcp/container/component_sdk/python/kfp_component/google/bigquery/__init__.py | Python | apache-2.0 | 601 |
#!/usr/bin/python
#
# Script to determine the performance statistics and other information
# related to libvirt guests
# https://github.com/odyssey4me/monitoring-scripts
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import sys
import socket
import libvirt
import argparse
import traceback
import jsonpickle
import subprocess
from xml.etree import ElementTree
# Version required for nagios
VERSION = 'check_virsh_domains v1.0'
# Convert the Domain State integer into the description
# http://libvirt.org/html/libvirt-libvirt.html#virDomainState
DOMAIN_STATES = {
0: 'None',
1: 'Running',
2: 'Blocked on resource',
3: 'Paused by user',
4: 'Being shut down',
5: 'Shut off',
6: 'Crashed',
7: 'Suspended by guest power management'
}
# Location of Zabbix Agent Configuration file
# TODO: This really should either be found, or be an optional argument
ZABBIX_CONF = '/opt/zabbix/etc/zabbix_agentd.conf'
# Location of the zabbix_sender binary
# TODO: This really should either be found, or be an optional argument
ZABBIX_SENDER = '/opt/zabbix/bin/zabbix_sender'
class Domain(object):
def __init__(self, vir_dom):
try:
# Get the domain's network interface device list
if_devices = self.get_if_devices(vir_dom)
# Get the domain's block device list
blk_devices = self.get_blk_devices(vir_dom)
# Get the domain's information
dom_info = vir_dom.info()
# Get the domain's memory stats
mem_stats = vir_dom.memoryStats()
# Get the domain's UUID
self.uuid = vir_dom.UUIDString()
# Compile the network interface stats for each network interface device
for if_num, if_dev in enumerate(if_devices):
# Get the interface stats
if_stats = vir_dom.interfaceStats(if_dev)
# Set class attributes using the interface index number (not the name)
setattr(self, 'if_%s_rx_bytes' % if_num, int(if_stats[0]))
setattr(self, 'if_%s_rx_packets' % if_num, int(if_stats[1]))
setattr(self, 'if_%s_rx_errors' % if_num, int(if_stats[2]))
setattr(self, 'if_%s_rx_drop' % if_num, int(if_stats[3]))
setattr(self, 'if_%s_tx_bytes' % if_num, int(if_stats[4]))
setattr(self, 'if_%s_tx_packets' % if_num, int(if_stats[5]))
setattr(self, 'if_%s_tx_errors' % if_num, int(if_stats[6]))
setattr(self, 'if_%s_tx_drop' % if_num, int(if_stats[7]))
# Compile the block device stats for each block device
for blk_dev in blk_devices:
#Get the block device stats
blk_stats = vir_dom.blockStats(blk_dev)
# Set class attributes using the device name
setattr(self, 'blk_%s_rd_req' % blk_dev, int(blk_stats[0]))
setattr(self, 'blk_%s_rd_bytes' % blk_dev, int(blk_stats[1]))
setattr(self, 'blk_%s_wr_req' % blk_dev, int(blk_stats[2]))
setattr(self, 'blk_%s_wr_bytes' % blk_dev, int(blk_stats[3]))
# Get the memory stats in kB and covert to B for consistency
self.mem_max_bytes = int(dom_info[1]) * 1024
self.mem_used_bytes = int(dom_info[2]) * 1024
# Get the number of vCPU's and the usage time in nanoseconds
self.cpu_count = int(dom_info[3])
self.cpu_time = int(dom_info[4])
# Get the state of the domain
self.state = DOMAIN_STATES[dom_info[0]]
# Note:
# To calculate %CPU utilization you need to have a time period. We're expecting that the
# %CPU calculation is done externally by a system that knows the time period between measurements.
#
# For reference:
# http://people.redhat.com/~rjones/virt-top/faq.html#calccpu
# cpu_time_diff = cpuTime_now - cpuTime_t_seconds_ago
# %CPU = 100 * cpu_time_diff / (t * host_cpus * 10^9)
# There may not be anything in mem_stats (support is limited), but let's add any values there may be
for key, value in mem_stats.iteritems():
value_bytes = int(value) * 1024
setattr(self, 'mem_%s' % key, value_bytes)
except OSError:
print 'Failed to get domain information'
def get_if_devices(self, vir_dom):
#Function to return a list of network devices used
#Create a XML tree from the domain XML description
dom_tree = ElementTree.fromstring(vir_dom.XMLDesc(0))
#The list of device names
devices = []
#Iterate through all network interface target elements of the domain
for target in dom_tree.findall("devices/interface/target"):
#Get the device name
dev = target.get("dev")
#If this device is already in the list, don't add it again
if not dev in devices:
devices.append(dev)
#Completed device name list
return devices
def get_blk_devices(self, vir_dom):
#Function to return a list of block devices used
#Create a XML tree from the domain XML description
dom_tree = ElementTree.fromstring(vir_dom.XMLDesc(0))
#The list of device names
devices = []
#Iterate through all network interface target elements of the domain
for target in dom_tree.findall("devices/disk/target"):
#Get the device name
dev = target.get("dev")
#If this device is already in the list, don't add it again
if not dev in devices:
devices.append(dev)
#Completed device name list
return devices
def health(self):
output = {'errorlevel': 0, 'errors': []}
# Check whether there are network interface errors or drops
for key in vars(self):
if re.match('if_.*_errors', key):
if vars(self)[key] > 0:
output['errors'].append('Domain has network interface errors.')
output['errorlevel'] = set_errorlevel(output['errorlevel'], 1)
if re.match('if_.*_drop', key):
if vars(self)[key] > 0:
output['errors'].append('Domain has network interface drops.')
output['errorlevel'] = set_errorlevel(output['errorlevel'], 1)
# Check whether the domain is in a 'blocked' or 'crashed' state
if self.state == 'Blocked on resource' or self.state == 'Crashed':
output['errors'].append('Domain is %s!' % self.state)
output['errorlevel'] = set_errorlevel(output['errorlevel'], 2)
return output
def inventory(self):
output = {}
output['mem_max_bytes'] = '%i' % self.mem_max_bytes
output['cpu_count'] = '%i' % self.cpu_count
output['state'] = '%s' % self.state
output['uuid'] = '%s' % self.uuid
return output
def perfdata(self):
output = {}
# Loop through all attributes and add the if and blk data
for key in vars(self):
if re.match('if_.*', key) or re.match('blk_.*', key):
output[key] = vars(self)[key]
output['mem_used_bytes'] = self.mem_used_bytes
output['cpu_time'] = self.cpu_time
return output
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--discovery', action='store_true', help='Only output discovery data')
ap.add_argument('-i', '--inventory', action='store_true', help='Include inventory data in output')
ap.add_argument('-o', '--output', default='stdout', choices=['stdout', 'nagios', 'zabbix'], help='Output format')
ap.add_argument('-p', '--perfdata', action='store_true', help='Include performance data in output')
ap.add_argument('-v', '--verbose', default=0, action='count', help='Verbose output')
ap.add_argument('-V', '--version', action='store_true', help='Show script version')
return ap.parse_args()
def set_errorlevel(current, target):
if current < target != 3:
return target
elif target == 3:
return 3
else:
return current
def output_status(item_name, check_type, errorlevel):
if errorlevel == 0:
return '%s %s OK' % (item_name, check_type)
elif errorlevel == 1:
return '%s %s WARNING' % (item_name, check_type)
elif errorlevel == 2:
return '%s %s CRITICAL' % (item_name, check_type)
else:
return '%s %s UNKNOWN' % (item_name, check_type)
def output_stdout(args):
domains = domain_list()
errorlevels = []
for domain in domains:
print output_status('Domain %s' % domain.uuid, 'Health', domain.health()['errorlevel'])
errorlevels.append(domain.health()['errorlevel'])
if args.verbose > 0:
for error in domain.health()['errors']:
print ' - %s' % error
if args.perfdata:
for key, value in domain.perfdata().iteritems():
print ' - %s = %s' % (key, value)
if args.inventory:
for key, value in domain.inventory().iteritems():
print ' - %s = %s' % (key, value)
# filter out 'unknown' errorlevels if there are any 'warning' or 'critical' errorlevels
if (1 in errorlevels or 2 in errorlevels) and max(errorlevels) == 3:
errorlevels = filter(lambda item: item != 3, errorlevels)
sys.exit(max(errorlevels))
def output_nagios(args):
domains = domain_list()
output_line = ''
output_perfdata = ' |'
errorlevels = []
for domain in domains:
if output_line != '':
output_line += '; '
output_line += output_status('Dom %s' % domain.uuid, 'Health',
domain.health()['errorlevel'])
errorlevels.append(domain.health()['errorlevel'])
if args.verbose > 0:
for error in domain.health()['errors']:
output_line += ' %s' % error
if args.perfdata:
for key, value in domain.perfdata().iteritems():
output_perfdata += " %s='%s'" % (key, value)
if args.perfdata:
output_line += output_perfdata
print output_line
# filter out 'unknown' errorlevels if there are any 'warning' or 'critical' errorlevels
if (1 in errorlevels or 2 in errorlevels) and max(errorlevels) == 3:
errorlevels = filter(lambda item: item != 3, errorlevels)
sys.exit(max(errorlevels))
def output_zabbix(args):
domains = domain_list()
output_line = ''
errorlevels = []
for domain in domains:
output_line += '%s virsh.domain[%s,health] %s\n' % (socket.gethostname(), domain.uuid, output_status(domain.uuid,'Health', domain.health()['errorlevel']))
errorlevels.append(domain.health()['errorlevel'])
if args.verbose > 0 and len(domain.health()['errors']) > 0:
output_line += '%s virsh.domain[%s,errors] %s\n' % (socket.gethostname(), domain.uuid, ";".join(domain.health()['errors']))
elif args.verbose > 0 and len(domain.health()['errors']) == 0:
output_line += '%s virsh.domain[%s,errors] None\n' % (socket.gethostname(), domain.uuid)
if args.perfdata:
for key, value in domain.perfdata().iteritems():
output_line += '%s virsh.domain[%s,%s] %s\n' % (socket.gethostname(), domain.uuid, key, value)
if args.inventory:
for key, value in domain.inventory().iteritems():
output_line += '%s virsh.domain[%s,%s] %s\n' % (socket.gethostname(), domain.uuid, key, value)
# filter out 'unknown' errorlevels if there are any 'warning' or 'critical' errorlevels
if (1 in errorlevels or 2 in errorlevels) and max(errorlevels) == 3:
errorlevels = filter(lambda item: item != 3, errorlevels)
#TODO: This should really have exception handling
cmd = '%s -c %s -v -i -' % (ZABBIX_SENDER, ZABBIX_CONF)
cmd = cmd.split(' ')
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.stdin.write(output_line)
status = p.poll()
stdout, stderr = p.communicate()
if not status:
print 'zabbix_sender output: %s' % stdout
else:
print 'zabbix_sender error: %s' % stdout
print output_status('Overall','Health', max(errorlevels))
sys.exit(max(errorlevels))
def output_zabbix_discovery(args):
#TODO: Sort this mess out.
#Using the objects was too slow - the discovery would keep failing when requested by the Zabbix Server
try:
# Connect to the local hypervisor (read only)
conn = libvirt.openReadOnly(None)
# Prepare the lists and dict objects
dom_list = []
return_dict = {}
# Loop through the running domains and retrieve the appropriate discovery information
for dom_id in conn.listDomainsID():
dom_dict = {}
vir_dom = conn.lookupByID(dom_id)
dom_dict['{#VIRSH_DOMAIN_UUID}'] = vir_dom.UUIDString()
if args.perfdata:
dom_tree = ElementTree.fromstring(vir_dom.XMLDesc(0))
#The list of device names
if_devices = []
#Iterate through all network interface target elements of the domain
for target in dom_tree.findall("devices/interface/target"):
#Get the device name
dev = target.get("dev")
#If this device is already in the list, don't add it again
if not dev in if_devices:
if_devices.append(dev)
#Put the final device list into the domain's return dict
for if_num, if_dev in enumerate(if_devices):
dom_dict['{#VIRSH_DOMAIN_NIC}'] = str(if_num)
#The list of device names
blk_devices = []
#Iterate through all network interface target elements of the domain
for target in dom_tree.findall("devices/disk/target"):
#Get the device name
dev = target.get("dev")
#If this device is already in the list, don't add it again
if not dev in blk_devices:
blk_devices.append(dev)
#Put the final device list into the domain's return dict
for blk_dev in blk_devices:
dom_dict['{#VIRSH_DOMAIN_DISK}'] = blk_dev
dom_list.append(dom_dict)
# Loop through the offline domains and retrieve the appropriate discovery information
for name in conn.listDefinedDomains():
dom_dict = {}
vir_dom = conn.lookupByID(dom_id)
dom_dict['{#VIRSH_DOMAIN_UUID}'] = vir_dom.UUIDString()
if args.perfdata:
dom_tree = ElementTree.fromstring(vir_dom.XMLDesc(0))
#The list of device names
if_devices = []
#Iterate through all network interface target elements of the domain
for target in dom_tree.findall("devices/interface/target"):
#Get the device name
dev = target.get("dev")
#If this device is already in the list, don't add it again
if not dev in if_devices:
if_devices.append(dev)
#Put the final device list into the domain's return dict
for if_num, if_dev in enumerate(if_devices):
dom_dict['{#VIRSH_DOMAIN_NIC}'] = str(if_num)
#The list of device names
blk_devices = []
#Iterate through all network interface target elements of the domain
for target in dom_tree.findall("devices/disk/target"):
#Get the device name
dev = target.get("dev")
#If this device is already in the list, don't add it again
if not dev in blk_devices:
blk_devices.append(dev)
#Put the final device list into the domain's return dict
for blk_dev in blk_devices:
dom_dict['{#VIRSH_DOMAIN_DISK}'] = blk_dev
dom_list.append(dom_dict)
return_dict['data'] = dom_list
# return the data encoded as json
print jsonpickle.encode(return_dict)
except OSError:
print 'Failed to get domain list'
def domain_list():
try:
# Connect to the local hypervisor (read only)
conn = libvirt.openReadOnly(None)
# Prepare the list of domains to return
dom_list = []
# Loop through the running domains, create and store the objects
for id in conn.listDomainsID():
vir_dom = conn.lookupByID(id)
dom_obj = Domain(vir_dom)
dom_list.append(dom_obj)
# Loop through the offline domains, create and store the objects
for name in conn.listDefinedDomains():
vir_dom = conn.lookupByName(name)
dom_obj = Domain(vir_dom)
dom_list.append(dom_obj)
return dom_list
except OSError:
print 'Failed to get domain list'
return []
if __name__ == '__main__':
args = parse_args()
try:
if args.version:
print VERSION
elif args.output == 'stdout':
output_stdout(args)
elif args.output == 'nagios':
output_nagios(args)
elif args.output == 'zabbix' and not args.discovery:
output_zabbix(args)
elif args.output == 'zabbix' and args.discovery:
output_zabbix_discovery(args)
sys.exit(0)
except Exception, err:
#print("ERROR: %s" % err)
ex, val, tb = sys.exc_info()
traceback.print_exception(ex, val, tb)
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1) | odyssey4me/monitoring-scripts | check_virsh_domains.py | Python | apache-2.0 | 18,638 |
# -*- encoding: utf-8 -*-
# Copyright © 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import keystonemiddleware.audit as audit_middleware
from oslo_config import cfg
import oslo_middleware.cors as cors_middleware
import pecan
from ironic.api import config
from ironic.api.controllers import base
from ironic.api import hooks
from ironic.api import middleware
from ironic.api.middleware import auth_token
from ironic.common import exception
from ironic.conf import CONF
class IronicCORS(cors_middleware.CORS):
"""Ironic-specific CORS class
We're adding the Ironic-specific version headers to the list of simple
headers in order that a request bearing those headers might be accepted by
the Ironic REST API.
"""
simple_headers = cors_middleware.CORS.simple_headers + [
'X-Auth-Token',
base.Version.max_string,
base.Version.min_string,
base.Version.string
]
def get_pecan_config():
# Set up the pecan configuration
filename = config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(pecan_config=None, extra_hooks=None):
app_hooks = [hooks.ConfigHook(),
hooks.DBHook(),
hooks.ContextHook(pecan_config.app.acl_public_routes),
hooks.RPCHook(),
hooks.NoExceptionTracebackHook(),
hooks.PublicUrlHook()]
if extra_hooks:
app_hooks.extend(extra_hooks)
if not pecan_config:
pecan_config = get_pecan_config()
pecan.configuration.set_config(dict(pecan_config), overwrite=True)
app = pecan.make_app(
pecan_config.app.root,
debug=CONF.pecan_debug,
static_root=pecan_config.app.static_root if CONF.pecan_debug else None,
force_canonical=getattr(pecan_config.app, 'force_canonical', True),
hooks=app_hooks,
wrap_app=middleware.ParsableErrorMiddleware,
)
if CONF.audit.enabled:
try:
app = audit_middleware.AuditMiddleware(
app,
audit_map_file=CONF.audit.audit_map_file,
ignore_req_list=CONF.audit.ignore_req_list
)
except (EnvironmentError, OSError,
audit_middleware.PycadfAuditApiConfigError) as e:
raise exception.InputFileError(
file_name=CONF.audit.audit_map_file,
reason=e
)
if CONF.auth_strategy == "keystone":
app = auth_token.AuthTokenMiddleware(
app, dict(cfg.CONF),
public_api_routes=pecan_config.app.acl_public_routes)
# Create a CORS wrapper, and attach ironic-specific defaults that must be
# included in all CORS responses.
app = IronicCORS(app, CONF)
cors_middleware.set_defaults(
allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'],
expose_headers=[base.Version.max_string, base.Version.min_string,
base.Version.string]
)
return app
class VersionSelectorApplication(object):
def __init__(self):
pc = get_pecan_config()
self.v1 = setup_app(pecan_config=pc)
def __call__(self, environ, start_response):
return self.v1(environ, start_response)
| SauloAislan/ironic | ironic/api/app.py | Python | apache-2.0 | 3,844 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_policy import opts
from oslo_service import wsgi
from manila.common import config
CONF = config.CONF
def set_defaults(conf):
_safe_set_of_opts(conf, 'verbose', True)
_safe_set_of_opts(conf, 'state_path', os.path.abspath(
os.path.join(os.path.dirname(__file__),
'..',
'..')))
_safe_set_of_opts(conf, 'connection', "sqlite://", group='database')
_safe_set_of_opts(conf, 'sqlite_synchronous', False)
_POLICY_PATH = os.path.abspath(os.path.join(CONF.state_path,
'manila/tests/policy.json'))
opts.set_defaults(conf, policy_file=_POLICY_PATH)
_safe_set_of_opts(conf, 'share_export_ip', '0.0.0.0')
_safe_set_of_opts(conf, 'service_instance_user', 'fake_user')
_API_PASTE_PATH = os.path.abspath(os.path.join(CONF.state_path,
'etc/manila/api-paste.ini'))
wsgi.register_opts(conf)
_safe_set_of_opts(conf, 'api_paste_config', _API_PASTE_PATH)
_safe_set_of_opts(conf, 'share_driver',
'manila.tests.fake_driver.FakeShareDriver')
_safe_set_of_opts(conf, 'auth_strategy', 'noauth')
_safe_set_of_opts(conf, 'zfs_share_export_ip', '1.1.1.1')
_safe_set_of_opts(conf, 'zfs_service_ip', '2.2.2.2')
_safe_set_of_opts(conf, 'zfs_zpool_list', ['foo', 'bar'])
_safe_set_of_opts(conf, 'zfs_share_helpers', 'NFS=foo.bar.Helper')
_safe_set_of_opts(conf, 'zfs_replica_snapshot_prefix', 'foo_prefix_')
_safe_set_of_opts(conf, 'hitachi_hsp_host', '172.24.47.190')
_safe_set_of_opts(conf, 'hitachi_hsp_username', 'hsp_user')
_safe_set_of_opts(conf, 'hitachi_hsp_password', 'hsp_password')
_safe_set_of_opts(conf, 'qnap_management_url', 'http://1.2.3.4:8080')
_safe_set_of_opts(conf, 'qnap_share_ip', '1.2.3.4')
_safe_set_of_opts(conf, 'qnap_nas_login', 'admin')
_safe_set_of_opts(conf, 'qnap_nas_password', 'qnapadmin')
_safe_set_of_opts(conf, 'qnap_poolname', 'Storage Pool 1')
def _safe_set_of_opts(conf, *args, **kwargs):
try:
conf.set_default(*args, **kwargs)
except config.cfg.NoSuchOptError:
# Assumed that opt is not imported and not used
pass
| bswartz/manila | manila/tests/conf_fixture.py | Python | apache-2.0 | 2,989 |
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import os
import mock
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.volume.drivers import smbfs
class SmbFsTestCase(test.TestCase):
_FAKE_SHARE = '//1.2.3.4/share1'
_FAKE_MNT_BASE = '/mnt'
_FAKE_VOLUME_NAME = 'volume-4f711859-4928-4cb7-801a-a50c37ceaccc'
_FAKE_TOTAL_SIZE = '2048'
_FAKE_TOTAL_AVAILABLE = '1024'
_FAKE_TOTAL_ALLOCATED = 1024
_FAKE_VOLUME = {'id': '4f711859-4928-4cb7-801a-a50c37ceaccc',
'size': 1,
'provider_location': _FAKE_SHARE,
'name': _FAKE_VOLUME_NAME,
'status': 'available'}
_FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, 'fake_hash')
_FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_NAME)
_FAKE_SNAPSHOT_ID = '5g811859-4928-4cb7-801a-a50c37ceacba'
_FAKE_SNAPSHOT = {'id': _FAKE_SNAPSHOT_ID,
'volume': _FAKE_VOLUME,
'status': 'available',
'volume_size': 1}
_FAKE_SNAPSHOT_PATH = (
_FAKE_VOLUME_PATH + '-snapshot' + _FAKE_SNAPSHOT_ID)
_FAKE_SHARE_OPTS = '-o username=Administrator,password=12345'
_FAKE_OPTIONS_DICT = {'username': 'Administrator',
'password': '12345'}
_FAKE_LISTDIR = [_FAKE_VOLUME_NAME, _FAKE_VOLUME_NAME + '.vhd',
_FAKE_VOLUME_NAME + '.vhdx', 'fake_folder']
_FAKE_SMBFS_CONFIG = mock.MagicMock()
_FAKE_SMBFS_CONFIG.smbfs_oversub_ratio = 2
_FAKE_SMBFS_CONFIG.smbfs_used_ratio = 0.5
_FAKE_SMBFS_CONFIG.smbfs_shares_config = '/fake/config/path'
_FAKE_SMBFS_CONFIG.smbfs_default_volume_format = 'raw'
_FAKE_SMBFS_CONFIG.smbfs_sparsed_volumes = False
def setUp(self):
super(SmbFsTestCase, self).setUp()
smbfs.SmbfsDriver.__init__ = lambda x: None
self._smbfs_driver = smbfs.SmbfsDriver()
self._smbfs_driver._remotefsclient = mock.Mock()
self._smbfs_driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver._execute = mock.Mock()
self._smbfs_driver.base = self._FAKE_MNT_BASE
def test_delete_volume(self):
drv = self._smbfs_driver
fake_vol_info = self._FAKE_VOLUME_PATH + '.info'
drv._ensure_share_mounted = mock.MagicMock()
fake_ensure_mounted = drv._ensure_share_mounted
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
drv._delete = mock.Mock()
drv._local_path_volume_info = mock.Mock(
return_value=fake_vol_info)
with mock.patch('os.path.exists', lambda x: True):
drv.delete_volume(self._FAKE_VOLUME)
fake_ensure_mounted.assert_called_once_with(self._FAKE_SHARE)
drv._delete.assert_any_call(
self._FAKE_VOLUME_PATH)
drv._delete.assert_any_call(fake_vol_info)
def _test_setup(self, config, share_config_exists=True):
fake_exists = mock.Mock(return_value=share_config_exists)
fake_ensure_mounted = mock.MagicMock()
self._smbfs_driver._ensure_shares_mounted = fake_ensure_mounted
self._smbfs_driver.configuration = config
with mock.patch('os.path.exists', fake_exists):
if not (config.smbfs_shares_config and share_config_exists and
config.smbfs_oversub_ratio > 0 and
0 <= config.smbfs_used_ratio <= 1):
self.assertRaises(exception.SmbfsException,
self._smbfs_driver.do_setup,
None)
else:
self._smbfs_driver.do_setup(None)
self.assertEqual(self._smbfs_driver.shares, {})
fake_ensure_mounted.assert_called_once()
def test_setup_missing_shares_config_option(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_shares_config = None
self._test_setup(fake_config, None)
def test_setup_missing_shares_config_file(self):
self._test_setup(self._FAKE_SMBFS_CONFIG, False)
def test_setup_invlid_oversub_ratio(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_oversub_ratio = -1
self._test_setup(fake_config)
def test_setup_invalid_used_ratio(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_used_ratio = -1
self._test_setup(fake_config)
def _test_create_volume(self, volume_exists=False, volume_format=None):
fake_method = mock.MagicMock()
self._smbfs_driver.configuration = copy.copy(self._FAKE_SMBFS_CONFIG)
self._smbfs_driver._set_rw_permissions_for_all = mock.MagicMock()
fake_set_permissions = self._smbfs_driver._set_rw_permissions_for_all
self._smbfs_driver.get_volume_format = mock.MagicMock()
windows_image_format = False
fake_vol_path = self._FAKE_VOLUME_PATH
self._smbfs_driver.get_volume_format.return_value = volume_format
if volume_format:
if volume_format in ('vhd', 'vhdx'):
windows_image_format = volume_format
if volume_format == 'vhd':
windows_image_format = 'vpc'
method = '_create_windows_image'
fake_vol_path += '.' + volume_format
else:
method = '_create_%s_file' % volume_format
if volume_format == 'sparsed':
self._smbfs_driver.configuration.smbfs_sparsed_volumes = (
True)
else:
method = '_create_regular_file'
setattr(self._smbfs_driver, method, fake_method)
with mock.patch('os.path.exists', new=lambda x: volume_exists):
if volume_exists:
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._do_create_volume,
self._FAKE_VOLUME)
return
self._smbfs_driver._do_create_volume(self._FAKE_VOLUME)
if windows_image_format:
fake_method.assert_called_once_with(
fake_vol_path,
self._FAKE_VOLUME['size'],
windows_image_format)
else:
fake_method.assert_called_once_with(
fake_vol_path, self._FAKE_VOLUME['size'])
fake_set_permissions.assert_called_once_with(fake_vol_path)
def test_create_existing_volume(self):
self._test_create_volume(volume_exists=True)
def test_create_vhdx(self):
self._test_create_volume(volume_format='vhdx')
def test_create_qcow2(self):
self._test_create_volume(volume_format='qcow2')
def test_create_sparsed(self):
self._test_create_volume(volume_format='sparsed')
def test_create_regular(self):
self._test_create_volume()
def _test_find_share(self, existing_mounted_shares=True,
eligible_shares=True):
if existing_mounted_shares:
mounted_shares = ('fake_share1', 'fake_share2', 'fake_share3')
else:
mounted_shares = None
self._smbfs_driver._mounted_shares = mounted_shares
self._smbfs_driver._is_share_eligible = mock.Mock(
return_value=eligible_shares)
fake_capacity_info = ((2, 1, 5), (2, 1, 4), (2, 1, 1))
self._smbfs_driver._get_capacity_info = mock.Mock(
side_effect=fake_capacity_info)
if not mounted_shares:
self.assertRaises(exception.SmbfsNoSharesMounted,
self._smbfs_driver._find_share,
self._FAKE_VOLUME['size'])
elif not eligible_shares:
self.assertRaises(exception.SmbfsNoSuitableShareFound,
self._smbfs_driver._find_share,
self._FAKE_VOLUME['size'])
else:
ret_value = self._smbfs_driver._find_share(
self._FAKE_VOLUME['size'])
# The eligible share with the minimum allocated space
# will be selected
self.assertEqual(ret_value, 'fake_share3')
def test_find_share(self):
self._test_find_share()
def test_find_share_missing_mounted_shares(self):
self._test_find_share(existing_mounted_shares=False)
def test_find_share_missing_eligible_shares(self):
self._test_find_share(eligible_shares=False)
def _test_is_share_eligible(self, capacity_info, volume_size):
self._smbfs_driver._get_capacity_info = mock.Mock(
return_value=[float(x << 30) for x in capacity_info])
self._smbfs_driver.configuration = self._FAKE_SMBFS_CONFIG
return self._smbfs_driver._is_share_eligible(self._FAKE_SHARE,
volume_size)
def test_share_volume_above_used_ratio(self):
fake_capacity_info = (4, 1, 1)
fake_volume_size = 2
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, False)
def test_eligible_share(self):
fake_capacity_info = (4, 4, 0)
fake_volume_size = 1
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, True)
def test_share_volume_above_oversub_ratio(self):
fake_capacity_info = (4, 4, 7)
fake_volume_size = 2
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, False)
def test_share_reserved_above_oversub_ratio(self):
fake_capacity_info = (4, 4, 10)
fake_volume_size = 1
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, False)
def test_parse_options(self):
(opt_list,
opt_dict) = self._smbfs_driver.parse_options(
self._FAKE_SHARE_OPTS)
expected_ret = ([], self._FAKE_OPTIONS_DICT)
self.assertEqual(expected_ret, (opt_list, opt_dict))
def test_parse_credentials(self):
fake_smb_options = r'-o user=MyDomain\Administrator,noperm'
expected_flags = '-o username=Administrator,noperm'
flags = self._smbfs_driver.parse_credentials(fake_smb_options)
self.assertEqual(expected_flags, flags)
def test_get_volume_path(self):
self._smbfs_driver.get_volume_format = mock.Mock(
return_value='vhd')
self._smbfs_driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
expected = self._FAKE_VOLUME_PATH + '.vhd'
ret_val = self._smbfs_driver.local_path(self._FAKE_VOLUME)
self.assertEqual(expected, ret_val)
def test_initialize_connection(self):
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
self._smbfs_driver._get_mount_point_base = mock.Mock(
return_value=self._FAKE_MNT_BASE)
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver._qemu_img_info = mock.Mock(
return_value=mock.Mock(file_format='raw'))
fake_data = {'export': self._FAKE_SHARE,
'format': 'raw',
'name': self._FAKE_VOLUME_NAME,
'options': self._FAKE_SHARE_OPTS}
expected = {
'driver_volume_type': 'smbfs',
'data': fake_data,
'mount_point_base': self._FAKE_MNT_BASE}
ret_val = self._smbfs_driver.initialize_connection(
self._FAKE_VOLUME, None)
self.assertEqual(expected, ret_val)
def _test_extend_volume(self, extend_failed=False, image_format='raw'):
drv = self._smbfs_driver
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv._check_extend_volume_support = mock.Mock(
return_value=True)
drv._is_file_size_equal = mock.Mock(
return_value=not extend_failed)
drv._qemu_img_info = mock.Mock(
return_value=mock.Mock(file_format=image_format))
with contextlib.nested(
mock.patch.object(image_utils, 'resize_image'),
mock.patch.object(image_utils, 'convert_image')) as (
fake_resize, fake_convert):
if extend_failed:
self.assertRaises(exception.ExtendVolumeError,
drv._extend_volume,
self._FAKE_VOLUME, mock.sentinel.new_size)
else:
drv._extend_volume(
self._FAKE_VOLUME,
mock.sentinel.new_size)
if image_format in (drv._DISK_FORMAT_VHDX,
drv._DISK_FORMAT_VHD_LEGACY):
fake_tmp_path = self._FAKE_VOLUME_PATH + '.tmp'
fake_convert.assert_any_call(self._FAKE_VOLUME_PATH,
fake_tmp_path, 'raw')
fake_resize.assert_called_once_with(
fake_tmp_path, mock.sentinel.new_size)
fake_convert.assert_any_call(fake_tmp_path,
self._FAKE_VOLUME_PATH,
image_format)
else:
fake_resize.assert_called_once_with(
self._FAKE_VOLUME_PATH, mock.sentinel.new_size)
def test_extend_volume(self):
self._test_extend_volume()
def test_extend_volume_failed(self):
self._test_extend_volume(extend_failed=True)
def test_extend_vhd_volume(self):
self._test_extend_volume(image_format='vpc')
def _test_check_extend_support(self, has_snapshots=False,
is_eligible=True):
self._smbfs_driver.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
if has_snapshots:
active_file_path = self._FAKE_SNAPSHOT_PATH
else:
active_file_path = self._FAKE_VOLUME_PATH
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=active_file_path)
self._smbfs_driver._is_share_eligible = mock.Mock(
return_value=is_eligible)
if has_snapshots:
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._check_extend_volume_support,
self._FAKE_VOLUME, 2)
elif not is_eligible:
self.assertRaises(exception.ExtendVolumeError,
self._smbfs_driver._check_extend_volume_support,
self._FAKE_VOLUME, 2)
else:
self._smbfs_driver._check_extend_volume_support(
self._FAKE_VOLUME, 2)
self._smbfs_driver._is_share_eligible.assert_called_once_with(
self._FAKE_SHARE, 1)
def test_check_extend_support(self):
self._test_check_extend_support()
def test_check_extend_volume_with_snapshots(self):
self._test_check_extend_support(has_snapshots=True)
def test_check_extend_volume_uneligible_share(self):
self._test_check_extend_support(is_eligible=False)
def test_create_volume_from_in_use_snapshot(self):
fake_snapshot = {'status': 'in-use'}
self.assertRaises(
exception.InvalidSnapshot,
self._smbfs_driver.create_volume_from_snapshot,
self._FAKE_VOLUME, fake_snapshot)
def test_copy_volume_from_snapshot(self):
drv = self._smbfs_driver
fake_volume_info = {self._FAKE_SNAPSHOT_ID: 'fake_snapshot_file_name'}
fake_img_info = mock.MagicMock()
fake_img_info.backing_file = self._FAKE_VOLUME_NAME
drv.get_volume_format = mock.Mock(
return_value='raw')
drv._local_path_volume_info = mock.Mock(
return_value=self._FAKE_VOLUME_PATH + '.info')
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv._read_info_file = mock.Mock(
return_value=fake_volume_info)
drv._qemu_img_info = mock.Mock(
return_value=fake_img_info)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH[:-1])
drv._extend_volume = mock.Mock()
drv._set_rw_permissions_for_all = mock.Mock()
with mock.patch.object(image_utils, 'convert_image') as (
fake_convert_image):
drv._copy_volume_from_snapshot(
self._FAKE_SNAPSHOT, self._FAKE_VOLUME,
self._FAKE_VOLUME['size'])
drv._extend_volume.assert_called_once_with(
self._FAKE_VOLUME, self._FAKE_VOLUME['size'])
fake_convert_image.assert_called_once_with(
self._FAKE_VOLUME_PATH, self._FAKE_VOLUME_PATH[:-1], 'raw')
def test_ensure_mounted(self):
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver._ensure_share_mounted(self._FAKE_SHARE)
self._smbfs_driver._remotefsclient.mount.assert_called_once_with(
self._FAKE_SHARE, self._FAKE_SHARE_OPTS.split())
def _test_copy_image_to_volume(self, unsupported_qemu_version=False,
wrong_size_after_fetch=False):
drv = self._smbfs_driver
vol_size_bytes = self._FAKE_VOLUME['size'] << 30
fake_image_service = mock.MagicMock()
fake_image_service.show.return_value = (
{'id': 'fake_image_id', 'disk_format': 'raw'})
fake_img_info = mock.MagicMock()
if wrong_size_after_fetch:
fake_img_info.virtual_size = 2 * vol_size_bytes
else:
fake_img_info.virtual_size = vol_size_bytes
if unsupported_qemu_version:
qemu_version = [1, 5]
else:
qemu_version = [1, 7]
drv.get_volume_format = mock.Mock(
return_value=drv._DISK_FORMAT_VHDX)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv.get_qemu_version = mock.Mock(
return_value=qemu_version)
drv._do_extend_volume = mock.Mock()
drv.configuration = mock.MagicMock()
drv.configuration.volume_dd_blocksize = (
mock.sentinel.block_size)
exc = None
with contextlib.nested(
mock.patch.object(image_utils,
'fetch_to_volume_format'),
mock.patch.object(image_utils,
'qemu_img_info')) as (
fake_fetch,
fake_qemu_img_info):
if wrong_size_after_fetch:
exc = exception.ImageUnacceptable
elif unsupported_qemu_version:
exc = exception.InvalidVolume
fake_qemu_img_info.return_value = fake_img_info
if exc:
self.assertRaises(
exc, drv.copy_image_to_volume,
mock.sentinel.context, self._FAKE_VOLUME,
fake_image_service,
mock.sentinel.image_id)
else:
drv.copy_image_to_volume(
mock.sentinel.context, self._FAKE_VOLUME,
fake_image_service,
mock.sentinel.image_id)
fake_fetch.assert_called_once_with(
mock.sentinel.context, fake_image_service,
mock.sentinel.image_id, self._FAKE_VOLUME_PATH,
drv._DISK_FORMAT_VHDX,
mock.sentinel.block_size)
drv._do_extend_volume.assert_called_once_with(
self._FAKE_VOLUME_PATH, self._FAKE_VOLUME['size'])
def test_copy_image_to_volume(self):
self._test_copy_image_to_volume()
def test_copy_image_to_volume_wrong_size_after_fetch(self):
self._test_copy_image_to_volume(wrong_size_after_fetch=True)
def test_copy_image_to_volume_unsupported_qemu_version(self):
self._test_copy_image_to_volume(unsupported_qemu_version=True)
def test_get_capacity_info(self):
fake_block_size = 4096.0
fake_total_blocks = 1024
fake_avail_blocks = 512
fake_total_allocated = fake_total_blocks * fake_block_size
fake_df = ('%s %s %s' % (fake_block_size, fake_total_blocks,
fake_avail_blocks), None)
fake_du = (str(fake_total_allocated), None)
self._smbfs_driver._get_mount_point_for_share = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver._execute = mock.Mock(
side_effect=(fake_df, fake_du))
ret_val = self._smbfs_driver._get_capacity_info(self._FAKE_SHARE)
expected = (fake_block_size * fake_total_blocks,
fake_block_size * fake_avail_blocks,
fake_total_allocated)
self.assertEqual(expected, ret_val)
| e0ne/cinder | cinder/tests/test_smbfs.py | Python | apache-2.0 | 22,226 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tink.python.tink._keyset_reader."""
from typing import cast
from absl.testing import absltest
from tink.proto import tink_pb2
import tink
from tink import core
class JsonKeysetReaderTest(absltest.TestCase):
def test_read(self):
json_keyset = """
{
"primaryKeyId": 42,
"key": [
{
"keyData": {
"typeUrl": "type.googleapis.com/google.crypto.tink.AesGcmKey",
"keyMaterialType": "SYMMETRIC",
"value": "GhCS/1+ejWpx68NfGt6ziYHd"
},
"outputPrefixType": "TINK",
"keyId": 42,
"status": "ENABLED"
}
]
}"""
reader = tink.JsonKeysetReader(json_keyset)
keyset = reader.read()
self.assertEqual(keyset.primary_key_id, 42)
self.assertLen(keyset.key, 1)
def test_read_invalid(self):
reader = tink.JsonKeysetReader('not json')
with self.assertRaises(core.TinkError):
reader.read()
def test_read_encrypted(self):
# encryptedKeyset is a base64-encoding of 'some ciphertext with keyset'
json_encrypted_keyset = """
{
"encryptedKeyset": "c29tZSBjaXBoZXJ0ZXh0IHdpdGgga2V5c2V0",
"keysetInfo": {
"primaryKeyId": 42,
"keyInfo": [
{
"typeUrl": "type.googleapis.com/google.crypto.tink.AesGcmKey",
"outputPrefixType": "TINK",
"keyId": 42,
"status": "ENABLED"
}
]
}
}"""
reader = tink.JsonKeysetReader(json_encrypted_keyset)
enc_keyset = reader.read_encrypted()
self.assertEqual(enc_keyset.encrypted_keyset,
b'some ciphertext with keyset')
self.assertLen(enc_keyset.keyset_info.key_info, 1)
self.assertEqual(enc_keyset.keyset_info.key_info[0].type_url,
'type.googleapis.com/google.crypto.tink.AesGcmKey')
def test_read_encrypted_invalid(self):
reader = tink.JsonKeysetReader('not json')
with self.assertRaises(core.TinkError):
reader.read_encrypted()
class BinaryKeysetReaderTest(absltest.TestCase):
def test_read(self):
keyset = tink_pb2.Keyset()
keyset.primary_key_id = 42
key = keyset.key.add()
key.key_data.type_url = 'type.googleapis.com/google.crypto.tink.AesGcmKey'
key.key_data.key_material_type = tink_pb2.KeyData.SYMMETRIC
key.key_data.value = b'GhCS/1+ejWpx68NfGt6ziYHd'
key.output_prefix_type = tink_pb2.TINK
key.key_id = 42
key.status = tink_pb2.ENABLED
reader = tink.BinaryKeysetReader(keyset.SerializeToString())
self.assertEqual(keyset, reader.read())
def test_read_none(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(cast(bytes, None))
reader.read()
def test_read_empty(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(b'')
reader.read()
def test_read_invalid(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(b'some weird data')
reader.read()
def test_read_encrypted(self):
encrypted_keyset = tink_pb2.EncryptedKeyset()
encrypted_keyset.encrypted_keyset = b'c29tZSBjaXBoZXJ0ZXh0IHdpdGgga2V5c2V0'
encrypted_keyset.keyset_info.primary_key_id = 42
key_info = encrypted_keyset.keyset_info.key_info.add()
key_info.type_url = 'type.googleapis.com/google.crypto.tink.AesGcmKey'
key_info.output_prefix_type = tink_pb2.TINK
key_info.key_id = 42
key_info.status = tink_pb2.ENABLED
reader = tink.BinaryKeysetReader(
encrypted_keyset.SerializeToString())
self.assertEqual(encrypted_keyset, reader.read_encrypted())
def test_read_encrypted_none(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(cast(bytes, None))
reader.read_encrypted()
def test_read_encrypted_empty(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(b'')
reader.read_encrypted()
def test_read_encrypted_invalid(self):
with self.assertRaises(core.TinkError):
reader = tink.BinaryKeysetReader(b'some weird data')
reader.read_encrypted()
if __name__ == '__main__':
absltest.main()
| google/tink | python/tink/_keyset_reader_test.py | Python | apache-2.0 | 4,878 |
#EQUIPE 2
#Nahan Trindade Passos - 1615310021
#Ana Beatriz Frota - 1615310027
#
#
#
#
#
#
import math
print("Digite os termos da equacao ax2+bx+c")
a = float(input("Digite o valor de A:\n"))
if(a==0):
print("Nao e uma equacao de segundo grau")
else:
b = float(input("Valor de B:\n"))
c = float(input("Valor de C:\n"))
delta = (math.pow(b,2) - (4*a*c))
if(delta<0):
print("A equacao nao possui raizes reais")
elif(delta == 0):
raiz = ((-1)*b + math.sqrt(delta))/(2*a)
print("A equacao possui apenas uma raiz",raiz)
else:
raiz1 = ((-1)*b + math.sqrt(delta))/(2*a)
raiz2 = ((-1)*b - math.sqrt(delta))/(2*a)
print("A equacao possui duas raizes")
print("Primeira raiz:",raiz1)
print("Segunda raiz:",raiz2)
| any1m1c/ipc20161 | lista2/ipc_lista2.16.py | Python | apache-2.0 | 824 |
#!/usr/bin/env python
# coding: utf-8
"""
Copyright 2015 SYSTRAN Software, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class FullInspiration(object):
"""
NOTE: This class is auto generated by the systran code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Systran model
:param dict systran_types: The key is attribute name and the value is attribute type.
:param dict attribute_map: The key is attribute name and the value is json key in definition.
"""
self.systran_types = {
'id': 'str',
'location': 'FullLocation',
'type': 'str',
'title': 'str',
'introduction': 'str',
'content': 'str',
'photos': 'list[Photo]',
'videos': 'list[Video]'
}
self.attribute_map = {
'id': 'id',
'location': 'location',
'type': 'type',
'title': 'title',
'introduction': 'introduction',
'content': 'content',
'photos': 'photos',
'videos': 'videos'
}
# Inspiration Identifier
self.id = None # str
# Location
self.location = None # FullLocation
# Inspiration type
self.type = None # str
# Title
self.title = None # str
# Introduction
self.introduction = None # str
# Content
self.content = None # str
# Array of Photos
self.photos = None # list[Photo]
# Array of Videos
self.videos = None # list[Video]
def __repr__(self):
properties = []
for p in self.__dict__:
if p != 'systran_types' and p != 'attribute_map':
properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p]))
return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
| SYSTRAN/geographic-api-python-client | systran_geographic_api/models/full_inspiration.py | Python | apache-2.0 | 2,585 |
# -*- coding: utf-8 -*-
##############################################################################################
# This file is deprecated because Python 2.x is deprecated #
# A Python 3.x version of this file can be found at: #
# #
# https://github.com/Guymer/PyGuymer3/blob/master/load_GPS_EXIF.py #
##############################################################################################
def load_GPS_EXIF(fname, python = True):
# Load sub-functions ...
from .load_GPS_EXIF1 import load_GPS_EXIF1
from .load_GPS_EXIF2 import load_GPS_EXIF2
# Check what the user wants ...
if python:
# Will use the Python module "exifread" ...
return load_GPS_EXIF1(fname)
else:
# Will use the binary "exiftool" ...
return load_GPS_EXIF2(fname)
| Guymer/PyGuymer | load_GPS_EXIF.py | Python | apache-2.0 | 993 |
'''
Copyright 2013 George Caley
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import codecs
import os
import re
import sqlite3
# woo regular expressions
PREREQS_RE = re.compile(r"Pre-?req(?:uisites?)?:(.*?)(?:</p>|;)")
EXCLUSIONS_RE = re.compile(r"((?:Excluded|Exclusion|Exclusions|(?:and )?Excludes)[: ](.*?))(?:</p>|<br />)", re.IGNORECASE)
COREQS_RE = re.compile(r"Co-?requisite:(.*?)</p>", re.IGNORECASE)
NAME_RE = re.compile(r"<title>UNSW Handbook Course - (.*?) - [A-Z]{4}[0-9]{4}</title>", re.DOTALL)
DESC_RE = re.compile(r"<!-- Start Course Description -->(.*?)<!-- End Course description -->", re.DOTALL | re.IGNORECASE)
GENED_RE = re.compile(r"Available for General Education:")
OUTLINE_RE = re.compile(r"Course Outline:.*?<a .*?href=[\"'](.*?)[\"']")
UOC_RE = re.compile(r"Units of Credit:.*?([0-9]+)")
COURSE_RE = re.compile(r"[A-Z]{4}[0-9]{4}", re.IGNORECASE)
BR_RE = re.compile(r"<br ?/?>", re.IGNORECASE)
TAG_RE = re.compile(r"</?.*?>")
TYPE_PREREQUISITE = "prerequisite"
TYPE_COREQUISITE = "corequisite"
TYPE_EXCLUSION = "exclusion"
DATABASE_FILENAME = "courses.db"
COURSE_DIR = "courses"
if os.path.exists(DATABASE_FILENAME):
print "Deleting existing database"
os.unlink(DATABASE_FILENAME)
print "Creating new database"
conn = sqlite3.connect(DATABASE_FILENAME)
cur = conn.cursor()
print "Creating tables"
cur.execute("CREATE TABLE courses (code text primary key, name text, description text, prerequisites text, corequisites text, exclusions text, gened integer, outline text, uoc integer)")
cur.execute("CREATE TABLE relationships (source text, destination text, type text)")
print "Loading course list"
print
filenames = os.listdir(COURSE_DIR)
i = 0
for filename in filenames:
i += 1
code = filename.rstrip(".html")
print "Reading %s (%d/%d)" % (code, i, len(filenames))
# open with unicode support
f = codecs.open("%s/%s" % (COURSE_DIR, filename), encoding="utf-8", mode="r")
data = f.read()
f.close()
# strip 's and <strong> tags
data = data.replace(" ", " ")
data = data.replace("<strong>", "")
data = data.replace("</strong>", "")
# find name
match = re.search(NAME_RE, data)
if match:
name = match.group(1).strip().replace("\n", "")
print "Found name:", name
else:
name = None
print "Couldn't find name"
print "Fatal error!"
quit()
# find exclusions. all of them.
exclusions = ""
exclusions_list = []
while True:
match = re.search(EXCLUSIONS_RE, data)
if match:
exclusions = match.group(2).strip()
print "Found exclusions:", exclusions
data = data.replace(match.group(1), "")
exclusions_list = re.findall(COURSE_RE, exclusions)
print "Exclusions list:", exclusions_list
else:
#exclusions = None
#exclusions_list = []
#print "Couldn't find exclusions"
break
# find corequisites
match = re.search(COREQS_RE, data)
if match:
coreqs = match.group(1).strip()
print "Found corequisites:", coreqs
data = data.replace(match.group(0), "")
coreqs_list = map(unicode.upper, re.findall(COURSE_RE, coreqs))
print "Corequisites list:", coreqs_list
else:
coreqs = None
coreqs_list = []
print "Couldn't find corequisites"
# find prerequisites
match = re.search(PREREQS_RE, data)
if match:
prereqs = match.group(1).strip()
print "Found prerequisites:", prereqs
data = data.replace(match.group(0), "")
prereqs_list = map(unicode.upper, re.findall(COURSE_RE, prereqs))
print "Prerequisites list:", prereqs_list
else:
prereqs = None
prereqs_list = []
print "Couldn't find prerequisites"
# find description
match = re.search(DESC_RE, data)
if match:
desc = match.group(1).strip()
# change <br>'s
#desc = re.sub(BR_RE, "\n", desc)
# strip tags
#desc = re.sub(TAG_RE, "", desc)
#print "Found description:", desc
print "Found description"
else:
desc = None
print "Couldn't find description"
# find general education statement
match = re.search(GENED_RE, data)
if match:
gened = 1
else:
gened = 0
# find course outline
match = re.search(OUTLINE_RE, data)
if match:
outline = match.group(1).strip()
print "Found course outline:", outline
else:
outline = None
print "Couldn't find course outline"
# find uoc
match = re.search(UOC_RE, data)
if match:
uoc = match.group(1).strip()
try:
uoc = int(uoc)
print "Found UoC:", uoc
except:
print "UoC was not an integer: '%s'" % uoc
uoc = None
else:
uoc = None
print "Couldn't find UoC"
print "Writing to database"
cur.execute("INSERT INTO courses (code, name, description, prerequisites, corequisites, exclusions, gened, outline, uoc) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", (code, name, desc, prereqs, coreqs, exclusions, gened, outline, uoc))
for prereq in prereqs_list:
cur.execute("INSERT INTO relationships (source, destination, type) VALUES (?, ?, ?)", (code, prereq, TYPE_PREREQUISITE))
for coreq in coreqs_list:
cur.execute("INSERT INTO relationships (source, destination, type) VALUES (?, ?, ?)", (code, coreq, TYPE_COREQUISITE))
for exclusion in exclusions_list:
cur.execute("INSERT INTO relationships (source, destination, type) VALUES (?, ?, ?)", (code, exclusion, TYPE_EXCLUSION))
print
conn.commit()
conn.close()
| spake/pathways | binder.py | Python | apache-2.0 | 6,253 |
import inspect
import json
import os
import random
import subprocess
import time
import requests
import ast
import paramiko
import rancher
from rancher import ApiError
from lib.aws import AmazonWebServices
DEFAULT_TIMEOUT = 120
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "http://localhost:80")
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None")
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
TEST_IMAGE = os.environ.get('RANCHER_TEST_IMAGE', "sangeetha/mytestcontainer")
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
CLUSTER_NAME_2 = os.environ.get("RANCHER_CLUSTER_NAME_2", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def random_int(start, end):
return random.randint(start, end)
def random_test_name(name="test"):
return name + "-" + str(random_int(10000, 99999))
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_client_for_token(token):
return rancher.Client(url=CATTLE_API_URL, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(.5)
if time.time() - start > timeout:
if timeout_message:
raise Exception(timeout_message)
else:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
def random_name():
return "test" + "-" + str(random_int(10000, 99999))
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
client = get_client_for_token(token)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(kube_fname, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == pod_count
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == pod_count
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= pod_count
return
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
return pods_result["items"]
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False):
command = 'kubectl --kubeconfig {0} {1}'.format(
kube_fname, cmd)
if json_out:
command += ' -o json'
if stderr:
result = run_command_with_stderr(command)
else:
result = run_command(command)
if json_out:
result = json.loads(result)
print(result)
return result
def run_command(command):
return subprocess.check_output(command, shell=True, text=True)
def run_command_with_stderr(command):
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.output
returncode = e.returncode
print(returncode)
return output
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
while p.state != "running":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster):
client = get_admin_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if node.worker:
schedulable_nodes.append(node)
return schedulable_nodes
def get_role_nodes(cluster, role):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
client = get_admin_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
time.sleep(10)
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster)
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = node.externalIpAddress
cmd = curl_args + " http://" + host_ip + path
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name):
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url, verify=False):
try:
requests.get(url, verify=verify)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def wait_until_active(url, timeout=120):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for url '
'to become active')
return
def validate_http_response(cmd, target_name_list, client_pod=None):
if client_pod is None and cmd.startswith("http://"):
wait_until_active(cmd, 60)
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
result = run_command(curl_cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
result = result.rstrip()
print("cmd: \t" + cmd)
print("result: \t" + result)
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version=""):
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonset
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd")))
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster)
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
if not skipIngresscheck:
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id], "targetPort": "80"}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected)
def validate_dns_entry(pod, host, expected):
# requires pod with `dig` available - TEST_IMAGE
cmd = 'ping -c 1 -W 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
assert ping_validation_pass is True
assert " 0% packet loss" in str(ping_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete Cluster
client.delete(cluster)
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if (len(nodes) > 0):
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
nodes = client.list_node(clusterId=cluster.id).data
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststess*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
ip_list.append(node.externalIpAddress)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
cmd = "ping -c 1 -W 1 " + pod_ip
response = kubectl_pod_exec(pod1, cmd)
print("Actual ping Response from " + pod1.name + ":" + str(response))
if allow_connectivity:
assert pod_ip in str(response) and " 0% packet loss" in str(response)
else:
assert pod_ip in str(response) and " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password, user="root", sshKey=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if sshKey:
ssh.connect(ip, username=user, key_filename=sshKey, port=port)
else:
ssh.connect(ip, username=user, password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[]):
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
if len(target_name_list) > 0:
host_ip = node.externalIpAddress
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload):
url = get_endpoint_url_for_workload(p_client, workload)
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster):
get_endpoint_url_for_workload(p_client, workload, 60)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port = wl.publicEndpoints[0]["port"]
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=wl.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = node.externalIpAddress
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
time.sleep(5)
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid,
name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1, "Cannot find multi cluster app"
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(
uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def wait_for_app_to_active(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
app_data = client.list_app(name=app_id).data
start = time.time()
assert len(app_data) == 1, "Cannot find app"
application = app_data[0]
while application.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
app = client.list_app(name=app_id).data
assert len(app) == 1
application = app[0]
return application
def validate_response_app_endpoint(p_client, appId):
ingress_list = p_client.list_ingress(namespaceId=appId).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
print(url)
try:
r = requests.head(url)
assert r.status_code == 200, \
"Http response is not 200. Failed to launch the app"
except requests.ConnectionError:
print("failed to connect")
assert False, "failed to connect to the app"
| sabiodelhielo/rancher-validation | tests/v3_api/common.py | Python | apache-2.0 | 41,077 |
__author__ = 'mpetyx'
from tastypie.authorization import DjangoAuthorization
from .models import OpeniQuestion
from OPENiapp.APIS.OpeniGenericResource import GenericResource
from OPENiapp.APIS.OPENiAuthorization import Authorization
from OPENiapp.APIS.OPENiAuthentication import Authentication
class QuestionResource(GenericResource):
class Meta:
queryset = OpeniQuestion.objects.all()
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
resource_name = 'question'
authentication = Authentication()
authorization = Authorization()
# filtering = {
# 'slug': ALL,
# 'user': ALL_WITH_RELATIONS,
# 'created': ['exact', 'range', 'gt', 'gte', 'lt', 'lte'],
# }
extra_actions = [
{
"name": "comments",
"http_method": "GET",
"resource_type": "list",
"description": "comments from CBS",
"fields": {
"cbs": {
"type": "string",
"required": True,
"description": "list of selected CBS"
}
}
},
{
"name": "likes",
"http_method": "GET",
"resource_type": "list",
"description": "likes from CBS",
"fields": {
"cbs": {
"type": "string",
"required": True,
"description": "list of selected CBS"
}
}
},
{
"name": "dislikes",
"http_method": "GET",
"resource_type": "list",
"description": "dislikes from CBS",
"fields": {
"cbs": {
"type": "string",
"required": True,
"description": "list of selected CBS"
}
}
}
] | OPENi-ict/ntua_demo | openiPrototype/openiPrototype/APIS/Activity/Question/Resources.py | Python | apache-2.0 | 2,158 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Images and Snapshots.
"""
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import api
from horizon import exceptions
from horizon import tables
from horizon import tabs
from .images.tables import ImagesTable
from .snapshots.tables import SnapshotsTable
from .volume_snapshots.tables import VolumeSnapshotsTable
from .volume_snapshots.tabs import SnapshotDetailTabs
LOG = logging.getLogger(__name__)
class IndexView(tables.MultiTableView):
table_classes = (ImagesTable, SnapshotsTable, VolumeSnapshotsTable)
template_name = 'project/images_and_snapshots/index.html'
def has_more_data(self, table):
return getattr(self, "_more_%s" % table.name, False)
def get_images_data(self):
marker = self.request.GET.get(ImagesTable._meta.pagination_param, None)
try:
# FIXME(gabriel): The paging is going to be strange here due to
# our filtering after the fact.
(all_images,
self._more_images) = api.image_list_detailed(self.request,
marker=marker)
images = [im for im in all_images
if im.container_format not in ['aki', 'ari'] and
im.properties.get("image_type", '') != "snapshot"]
except:
images = []
exceptions.handle(self.request, _("Unable to retrieve images."))
return images
def get_snapshots_data(self):
req = self.request
marker = req.GET.get(SnapshotsTable._meta.pagination_param, None)
try:
snaps, self._more_snapshots = api.snapshot_list_detailed(req,
marker=marker)
except:
snaps = []
exceptions.handle(req, _("Unable to retrieve snapshots."))
return snaps
def get_volume_snapshots_data(self):
try:
snapshots = api.volume_snapshot_list(self.request)
except:
snapshots = []
exceptions.handle(self.request, _("Unable to retrieve "
"volume snapshots."))
return snapshots
class DetailView(tabs.TabView):
tab_group_class = SnapshotDetailTabs
template_name = 'project/images_and_snapshots/snapshots/detail.html'
| 1ukash/horizon | horizon/dashboards/project/images_and_snapshots/views.py | Python | apache-2.0 | 3,222 |
# Copyright (c) 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume.drivers.dell import dell_storagecenter_api
import mock
from requests import models
import uuid
LOG = logging.getLogger(__name__)
# We patch these here as they are used by every test to keep
# from trying to contact a Dell Storage Center.
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'__init__',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'open_connection')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'close_connection')
class DellSCSanAPITestCase(test.TestCase):
'''DellSCSanAPITestCase
Class to test the Storage Center API using Mock.
'''
SC = {u'IPv6ManagementIPPrefix': 128,
u'connectionError': u'',
u'instanceId': u'64702',
u'scSerialNumber': 64702,
u'dataProgressionRunning': False,
u'hostOrIpAddress': u'192.168.0.80',
u'userConnected': True,
u'portsBalanced': True,
u'managementIp': u'192.168.0.80',
u'version': u'6.5.1.269',
u'location': u'',
u'objectType': u'StorageCenter',
u'instanceName': u'Storage Center 64702',
u'statusMessage': u'',
u'status': u'Up',
u'flashOptimizedConfigured': False,
u'connected': True,
u'operationMode': u'Normal',
u'userName': u'Admin',
u'nonFlashOptimizedConfigured': True,
u'name': u'Storage Center 64702',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'serialNumber': 64702,
u'raidRebalanceRunning': False,
u'userPasswordExpired': False,
u'contact': u'',
u'IPv6ManagementIP': u'::'}
VOLUME = {u'instanceId': u'64702.3494',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 3496,
u'objectType': u'ScVolume',
u'index': 3494,
u'volumeFolderPath': u'devstackvol/fcvm/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'statusMessage': u'',
u'status': u'Up',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName': u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'fcvm',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe000000000000000da8',
u'active': True,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-00000da8',
u'replayAllowed': True,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}
INACTIVE_VOLUME = \
{u'instanceId': u'64702.3494',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 3496,
u'objectType': u'ScVolume',
u'index': 3494,
u'volumeFolderPath': u'devstackvol/fcvm/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'statusMessage': u'',
u'status': u'Up',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName': u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'fcvm',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe000000000000000da8',
u'active': False,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-00000da8',
u'replayAllowed': True,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}
SCSERVER = {u'scName': u'Storage Center 64702',
u'volumeCount': 0,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 4,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'Server_21000024ff30441d',
u'instanceId': u'64702.47',
u'serverFolderPath': u'devstacksrv/',
u'portType': [u'FibreChannel'],
u'type': u'Physical',
u'statusMessage': u'Only 5 of 6 expected paths are up',
u'status': u'Degraded',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.4',
u'instanceName': u'devstacksrv',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Partial',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 5,
u'name': u'Server_21000024ff30441d',
u'hbaPresent': True,
u'hbaCount': 2,
u'notes': u'Created by Dell Cinder Driver',
u'mapped': False,
u'operatingSystem': {u'instanceId': u'64702.38',
u'instanceName': u'Red Hat Linux 6.x',
u'objectType': u'ScServerOperatingSystem'}
}
# ScServer where deletedAllowed=False (not allowed to be deleted)
SCSERVER_NO_DEL = {u'scName': u'Storage Center 64702',
u'volumeCount': 0,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 4,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'Server_21000024ff30441d',
u'instanceId': u'64702.47',
u'serverFolderPath': u'devstacksrv/',
u'portType': [u'FibreChannel'],
u'type': u'Physical',
u'statusMessage': u'Only 5 of 6 expected paths are up',
u'status': u'Degraded',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.4',
u'instanceName': u'devstacksrv',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Partial',
u'hostCacheIndex': 0,
u'deleteAllowed': False,
u'pathCount': 5,
u'name': u'Server_21000024ff30441d',
u'hbaPresent': True,
u'hbaCount': 2,
u'notes': u'Created by Dell Cinder Driver',
u'mapped': False,
u'operatingSystem':
{u'instanceId': u'64702.38',
u'instanceName': u'Red Hat Linux 6.x',
u'objectType': u'ScServerOperatingSystem'}
}
SCSERVERS = [{u'scName': u'Storage Center 64702',
u'volumeCount': 5,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 0,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'openstack4',
u'instanceId': u'64702.1',
u'serverFolderPath': u'',
u'portType': [u'Iscsi'],
u'type': u'Physical',
u'statusMessage': u'',
u'status': u'Up',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.0',
u'instanceName': u'Servers',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Up',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 0,
u'name': u'openstack4',
u'hbaPresent': True,
u'hbaCount': 1,
u'notes': u'',
u'mapped': True,
u'operatingSystem':
{u'instanceId': u'64702.3',
u'instanceName': u'Other Multipath',
u'objectType': u'ScServerOperatingSystem'}},
{u'scName': u'Storage Center 64702',
u'volumeCount': 1,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 0,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'openstack5',
u'instanceId': u'64702.2',
u'serverFolderPath': u'',
u'portType': [u'Iscsi'],
u'type': u'Physical',
u'statusMessage': u'',
u'status': u'Up',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.0',
u'instanceName': u'Servers',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Up',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 0, u'name': u'openstack5',
u'hbaPresent': True,
u'hbaCount': 1,
u'notes': u'',
u'mapped': True,
u'operatingSystem':
{u'instanceId': u'64702.2',
u'instanceName': u'Other Singlepath',
u'objectType': u'ScServerOperatingSystem'}}]
# ScServers list where status = Down
SCSERVERS_DOWN = \
[{u'scName': u'Storage Center 64702',
u'volumeCount': 5,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 0,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'openstack4',
u'instanceId': u'64702.1',
u'serverFolderPath': u'',
u'portType': [u'Iscsi'],
u'type': u'Physical',
u'statusMessage': u'',
u'status': u'Down',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.0',
u'instanceName': u'Servers',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Up',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 0,
u'name': u'openstack4',
u'hbaPresent': True,
u'hbaCount': 1,
u'notes': u'',
u'mapped': True,
u'operatingSystem':
{u'instanceId': u'64702.3',
u'instanceName': u'Other Multipath',
u'objectType': u'ScServerOperatingSystem'}}]
MAP_PROFILES = [{u'instanceId': u'64702.2941',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'lunUsed': [1],
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume':
{u'instanceId': u'64702.6025',
u'instanceName': u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'connectivity': u'Up',
u'readOnly': False,
u'objectType': u'ScMappingProfile',
u'hostCache': False,
u'mappedVia': u'Server',
u'mapCount': 3,
u'instanceName': u'6025-47',
u'lunRequested': u'N/A'}]
MAP_PROFILE = {u'instanceId': u'64702.2941',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'lunUsed': [1],
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume':
{u'instanceId': u'64702.6025',
u'instanceName': u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'connectivity': u'Up',
u'readOnly': False,
u'objectType': u'ScMappingProfile',
u'hostCache': False,
u'mappedVia': u'Server',
u'mapCount': 3,
u'instanceName': u'6025-47',
u'lunRequested': u'N/A'}
MAPPINGS = [{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName': u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'}]
# Multiple mappings to test find_iscsi_properties with multiple portals
MAPPINGS_MULTI_PORTAL = \
[{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName': u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'},
{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName': u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'}]
MAPPINGS_READ_ONLY = \
[{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': True,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName':
u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'}]
FC_MAPPINGS = [{u'profile': {u'instanceId': u'64702.2941',
u'instanceName': u'6025-47',
u'objectType': u'ScMappingProfile'},
u'status': u'Up',
u'statusMessage': u'',
u'instanceId': u'64702.7639.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'serverHba': {u'instanceId': u'64702.3282218607',
u'instanceName': u'21000024FF30441C',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64703.27.73',
u'instanceName':
u'21000024FF30441C-5000D31000FCBE36',
u'objectType': u'ScServerHbaPath'},
u'controllerPort':
{u'instanceId': u'64702.5764839588723736118.50',
u'instanceName': u'5000D31000FCBE36',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-7639',
u'transport': u'FibreChannel',
u'objectType': u'ScMapping'},
{u'profile': {u'instanceId': u'64702.2941',
u'instanceName': u'6025-47',
u'objectType': u'ScMappingProfile'},
u'status': u'Up',
u'statusMessage': u'',
u'instanceId': u'64702.7640.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume':
{u'instanceId': u'64702.6025',
u'instanceName': u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'serverHba': {u'instanceId': u'64702.3282218606',
u'instanceName': u'21000024FF30441D',
u'objectType': u'ScServerHba'},
u'path':
{u'instanceId': u'64702.64702.64703.27.78',
u'instanceName': u'21000024FF30441D-5000D31000FCBE36',
u'objectType': u'ScServerHbaPath'},
u'controllerPort':
{u'instanceId': u'64702.5764839588723736118.50',
u'instanceName': u'5000D31000FCBE36',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-7640',
u'transport': u'FibreChannel',
u'objectType': u'ScMapping'},
{u'profile': {u'instanceId': u'64702.2941',
u'instanceName': u'6025-47',
u'objectType': u'ScMappingProfile'},
u'status': u'Up',
u'statusMessage': u'',
u'instanceId': u'64702.7638.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'serverHba': {u'instanceId': u'64702.3282218606',
u'instanceName': u'21000024FF30441D',
u'objectType': u'ScServerHba'},
u'path':
{u'instanceId': u'64702.64702.64703.28.76',
u'instanceName': u'21000024FF30441D-5000D31000FCBE3E',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736126.60',
u'instanceName': u'5000D31000FCBE3E',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-7638',
u'transport': u'FibreChannel',
u'objectType': u'ScMapping'}]
RPLAY = {u'scSerialNumber': 64702,
u'globalIndex': u'64702-46-250',
u'description': u'Cinder Clone Replay',
u'parent': {u'instanceId': u'64702.46.249',
u'instanceName': u'64702-46-249',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.46.250',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'12/09/2014 03:52:08 PM',
u'createVolume': {u'instanceId': u'64702.46',
u'instanceName':
u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b',
u'objectType': u'ScVolume'},
u'expireTime': u'12/09/2014 04:52:08 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7910,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'12/09/2014 03:52:08 PM',
u'size': u'0.0 Bytes'
}
RPLAYS = [{u'scSerialNumber': 64702,
u'globalIndex': u'64702-6025-5',
u'description': u'Manually Created',
u'parent': {u'instanceId': u'64702.6025.4',
u'instanceName': u'64702-6025-4',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.6025.5',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'02/02/2015 08:23:55 PM',
u'createVolume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'expireTime': u'02/02/2015 09:23:55 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7889,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'02/02/2015 08:23:55 PM',
u'size': u'0.0 Bytes'},
{u'scSerialNumber': 64702,
u'globalIndex': u'64702-6025-4',
u'description': u'Cinder Test Replay012345678910',
u'parent': {u'instanceId': u'64702.6025.3',
u'instanceName': u'64702-6025-3',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.6025.4',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'02/02/2015 08:23:47 PM',
u'createVolume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'expireTime': u'02/02/2015 09:23:47 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7869,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'02/02/2015 08:23:47 PM',
u'size': u'0.0 Bytes'}]
TST_RPLAY = {u'scSerialNumber': 64702,
u'globalIndex': u'64702-6025-4',
u'description': u'Cinder Test Replay012345678910',
u'parent': {u'instanceId': u'64702.6025.3',
u'instanceName': u'64702-6025-3',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.6025.4',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'02/02/2015 08:23:47 PM',
u'createVolume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'expireTime': u'02/02/2015 09:23:47 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7869,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'02/02/2015 08:23:47 PM',
u'size': u'0.0 Bytes'}
FLDR = {u'status': u'Up',
u'instanceName': u'opnstktst',
u'name': u'opnstktst',
u'parent':
{u'instanceId': u'64702.0',
u'instanceName': u'Volumes',
u'objectType': u'ScVolumeFolder'},
u'instanceId': u'64702.43',
u'scName': u'Storage Center 64702',
u'notes': u'Folder for OpenStack Cinder Driver',
u'scSerialNumber': 64702,
u'parentIndex': 0,
u'okToDelete': True,
u'folderPath': u'',
u'root': False,
u'statusMessage': u'',
u'objectType': u'ScVolumeFolder'}
SVR_FLDR = {u'status': u'Up',
u'instanceName': u'devstacksrv',
u'name': u'devstacksrv',
u'parent': {u'instanceId': u'64702.0',
u'instanceName': u'Servers',
u'objectType': u'ScServerFolder'},
u'instanceId': u'64702.4',
u'scName': u'Storage Center 64702',
u'notes': u'Folder for OpenStack Cinder Driver',
u'scSerialNumber': 64702,
u'parentIndex': 0,
u'okToDelete': False,
u'folderPath': u'',
u'root': False,
u'statusMessage': u'',
u'objectType': u'ScServerFolder'}
ISCSI_HBA = {u'portWwnList': [],
u'iscsiIpAddress': u'0.0.0.0',
u'pathCount': 1,
u'name': u'iqn.1993-08.org.debian:01:52332b70525',
u'connectivity': u'Down',
u'instanceId': u'64702.3786433166',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'scSerialNumber': 64702,
u'server':
{u'instanceId': u'64702.38',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:52332b70525',
u'objectType': u'ScPhysicalServer'},
u'remoteStorageCenter': False,
u'iscsiName': u'',
u'portType': u'Iscsi',
u'instanceName': u'iqn.1993-08.org.debian:01:52332b70525',
u'objectType': u'ScServerHba'}
FC_HBAS = [{u'portWwnList': [],
u'iscsiIpAddress': u'0.0.0.0',
u'pathCount': 2,
u'name': u'21000024FF30441C',
u'connectivity': u'Up',
u'instanceId': u'64702.3282218607',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'scSerialNumber': 64702,
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'remoteStorageCenter': False,
u'iscsiName': u'',
u'portType': u'FibreChannel',
u'instanceName': u'21000024FF30441C',
u'objectType': u'ScServerHba'},
{u'portWwnList': [],
u'iscsiIpAddress': u'0.0.0.0',
u'pathCount': 3,
u'name': u'21000024FF30441D',
u'connectivity': u'Partial',
u'instanceId': u'64702.3282218606',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'scSerialNumber': 64702,
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'remoteStorageCenter': False,
u'iscsiName': u'',
u'portType': u'FibreChannel',
u'instanceName': u'21000024FF30441D',
u'objectType': u'ScServerHba'}]
FC_HBA = {u'portWwnList': [],
u'iscsiIpAddress': u'0.0.0.0',
u'pathCount': 3,
u'name': u'21000024FF30441D',
u'connectivity': u'Partial',
u'instanceId': u'64702.3282218606',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'scSerialNumber': 64702,
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'remoteStorageCenter': False,
u'iscsiName': u'',
u'portType': u'FibreChannel',
u'instanceName': u'21000024FF30441D',
u'objectType': u'ScServerHba'}
SVR_OS_S = [{u'allowsLunGaps': True,
u'product': u'Red Hat Linux',
u'supportsActiveMappingDeletion': True,
u'version': u'6.x',
u'requiresLunZero': False,
u'scName': u'Storage Center 64702',
u'virtualMachineGuest': True,
u'virtualMachineHost': False,
u'allowsCrossTransportMapping': False,
u'objectType': u'ScServerOperatingSystem',
u'instanceId': u'64702.38',
u'lunCanVaryAcrossPaths': False,
u'scSerialNumber': 64702,
u'maximumVolumeSize': u'0.0 Bytes',
u'multipath': True,
u'instanceName': u'Red Hat Linux 6.x',
u'supportsActiveMappingCreation': True,
u'name': u'Red Hat Linux 6.x'}]
ISCSI_FLT_DOMAINS = [{u'headerDigestEnabled': False,
u'classOfServicePriority': 0,
u'wellKnownIpAddress': u'192.168.0.21',
u'scSerialNumber': 64702,
u'iscsiName':
u'iqn.2002-03.com.compellent:5000d31000fcbe42',
u'portNumber': 3260,
u'subnetMask': u'255.255.255.0',
u'gateway': u'192.168.0.1',
u'objectType': u'ScIscsiFaultDomain',
u'chapEnabled': False,
u'instanceId': u'64702.6.5.3',
u'childStatus': u'Up',
u'defaultTimeToRetain': u'SECONDS_20',
u'dataDigestEnabled': False,
u'instanceName': u'iSCSI 10G 2',
u'statusMessage': u'',
u'status': u'Up',
u'transportType': u'Iscsi',
u'vlanId': 0,
u'windowSize': u'131072.0 Bytes',
u'defaultTimeToWait': u'SECONDS_2',
u'scsiCommandTimeout': u'MINUTES_1',
u'deleteAllowed': False,
u'name': u'iSCSI 10G 2',
u'immediateDataWriteEnabled': False,
u'scName': u'Storage Center 64702',
u'notes': u'',
u'mtu': u'MTU_1500',
u'bidirectionalChapSecret': u'',
u'keepAliveTimeout': u'SECONDS_30'}]
# For testing find_iscsi_properties where multiple portals are found
ISCSI_FLT_DOMAINS_MULTI_PORTALS = \
[{u'headerDigestEnabled': False,
u'classOfServicePriority': 0,
u'wellKnownIpAddress': u'192.168.0.21',
u'scSerialNumber': 64702,
u'iscsiName':
u'iqn.2002-03.com.compellent:5000d31000fcbe42',
u'portNumber': 3260,
u'subnetMask': u'255.255.255.0',
u'gateway': u'192.168.0.1',
u'objectType': u'ScIscsiFaultDomain',
u'chapEnabled': False,
u'instanceId': u'64702.6.5.3',
u'childStatus': u'Up',
u'defaultTimeToRetain': u'SECONDS_20',
u'dataDigestEnabled': False,
u'instanceName': u'iSCSI 10G 2',
u'statusMessage': u'',
u'status': u'Up',
u'transportType': u'Iscsi',
u'vlanId': 0,
u'windowSize': u'131072.0 Bytes',
u'defaultTimeToWait': u'SECONDS_2',
u'scsiCommandTimeout': u'MINUTES_1',
u'deleteAllowed': False,
u'name': u'iSCSI 10G 2',
u'immediateDataWriteEnabled': False,
u'scName': u'Storage Center 64702',
u'notes': u'',
u'mtu': u'MTU_1500',
u'bidirectionalChapSecret': u'',
u'keepAliveTimeout': u'SECONDS_30'},
{u'headerDigestEnabled': False,
u'classOfServicePriority': 0,
u'wellKnownIpAddress': u'192.168.0.25',
u'scSerialNumber': 64702,
u'iscsiName':
u'iqn.2002-03.com.compellent:5000d31000fcbe42',
u'portNumber': 3260,
u'subnetMask': u'255.255.255.0',
u'gateway': u'192.168.0.1',
u'objectType': u'ScIscsiFaultDomain',
u'chapEnabled': False,
u'instanceId': u'64702.6.5.3',
u'childStatus': u'Up',
u'defaultTimeToRetain': u'SECONDS_20',
u'dataDigestEnabled': False,
u'instanceName': u'iSCSI 10G 2',
u'statusMessage': u'',
u'status': u'Up',
u'transportType': u'Iscsi',
u'vlanId': 0,
u'windowSize': u'131072.0 Bytes',
u'defaultTimeToWait': u'SECONDS_2',
u'scsiCommandTimeout': u'MINUTES_1',
u'deleteAllowed': False,
u'name': u'iSCSI 10G 2',
u'immediateDataWriteEnabled': False,
u'scName': u'Storage Center 64702',
u'notes': u'',
u'mtu': u'MTU_1500',
u'bidirectionalChapSecret': u'',
u'keepAliveTimeout': u'SECONDS_30'}]
ISCSI_FLT_DOMAIN = {u'headerDigestEnabled': False,
u'classOfServicePriority': 0,
u'wellKnownIpAddress': u'192.168.0.21',
u'scSerialNumber': 64702,
u'iscsiName':
u'iqn.2002-03.com.compellent:5000d31000fcbe42',
u'portNumber': 3260,
u'subnetMask': u'255.255.255.0',
u'gateway': u'192.168.0.1',
u'objectType': u'ScIscsiFaultDomain',
u'chapEnabled': False,
u'instanceId': u'64702.6.5.3',
u'childStatus': u'Up',
u'defaultTimeToRetain': u'SECONDS_20',
u'dataDigestEnabled': False,
u'instanceName': u'iSCSI 10G 2',
u'statusMessage': u'',
u'status': u'Up',
u'transportType': u'Iscsi',
u'vlanId': 0,
u'windowSize': u'131072.0 Bytes',
u'defaultTimeToWait': u'SECONDS_2',
u'scsiCommandTimeout': u'MINUTES_1',
u'deleteAllowed': False,
u'name': u'iSCSI 10G 2',
u'immediateDataWriteEnabled': False,
u'scName': u'Storage Center 64702',
u'notes': u'',
u'mtu': u'MTU_1500',
u'bidirectionalChapSecret': u'',
u'keepAliveTimeout': u'SECONDS_30'}
CTRLR_PORT = {u'status': u'Up',
u'iscsiIpAddress': u'0.0.0.0',
u'WWN': u'5000D31000FCBE06',
u'name': u'5000D31000FCBE06',
u'iscsiGateway': u'0.0.0.0',
u'instanceId': u'64702.5764839588723736070.51',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'transportType': u'FibreChannel',
u'virtual': False,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'iscsiName': u'',
u'purpose': u'FrontEnd',
u'iscsiSubnetMask': u'0.0.0.0',
u'faultDomain':
{u'instanceId': u'64702.4.3',
u'instanceName': u'Domain 1',
u'objectType': u'ScControllerPortFaultDomain'},
u'instanceName': u'5000D31000FCBE06',
u'statusMessage': u'',
u'objectType': u'ScControllerPort'}
ISCSI_CTRLR_PORT = {u'preferredParent':
{u'instanceId': u'64702.5764839588723736074.69',
u'instanceName': u'5000D31000FCBE0A',
u'objectType': u'ScControllerPort'},
u'status': u'Up',
u'iscsiIpAddress': u'10.23.8.235',
u'WWN': u'5000D31000FCBE43',
u'name': u'5000D31000FCBE43',
u'parent':
{u'instanceId': u'64702.5764839588723736074.69',
u'instanceName': u'5000D31000FCBE0A',
u'objectType': u'ScControllerPort'},
u'iscsiGateway': u'0.0.0.0',
u'instanceId': u'64702.5764839588723736131.91',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'transportType': u'Iscsi',
u'virtual': True,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'iscsiName':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'purpose': u'FrontEnd',
u'iscsiSubnetMask': u'0.0.0.0',
u'faultDomain':
{u'instanceId': u'64702.6.5',
u'instanceName': u'iSCSI 10G 2',
u'objectType': u'ScControllerPortFaultDomain'},
u'instanceName': u'5000D31000FCBE43',
u'childStatus': u'Up',
u'statusMessage': u'',
u'objectType': u'ScControllerPort'}
FC_CTRLR_PORT = {u'preferredParent':
{u'instanceId': u'64702.5764839588723736093.57',
u'instanceName': u'5000D31000FCBE1D',
u'objectType': u'ScControllerPort'},
u'status': u'Up',
u'iscsiIpAddress': u'0.0.0.0',
u'WWN': u'5000D31000FCBE36',
u'name': u'5000D31000FCBE36',
u'parent':
{u'instanceId': u'64702.5764839588723736093.57',
u'instanceName': u'5000D31000FCBE1D',
u'objectType': u'ScControllerPort'},
u'iscsiGateway': u'0.0.0.0',
u'instanceId': u'64702.5764839588723736118.50',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'transportType': u'FibreChannel',
u'virtual': True,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'iscsiName': u'',
u'purpose': u'FrontEnd',
u'iscsiSubnetMask': u'0.0.0.0',
u'faultDomain':
{u'instanceId': u'64702.1.0',
u'instanceName': u'Domain 0',
u'objectType': u'ScControllerPortFaultDomain'},
u'instanceName': u'5000D31000FCBE36',
u'childStatus': u'Up',
u'statusMessage': u'',
u'objectType': u'ScControllerPort'}
STRG_USAGE = {u'systemSpace': u'7.38197504E8 Bytes',
u'freeSpace': u'1.297659461632E13 Bytes',
u'oversubscribedSpace': u'0.0 Bytes',
u'instanceId': u'64702',
u'scName': u'Storage Center 64702',
u'savingVsRaidTen': u'1.13737990144E11 Bytes',
u'allocatedSpace': u'1.66791217152E12 Bytes',
u'usedSpace': u'3.25716017152E11 Bytes',
u'configuredSpace': u'9.155796533248E12 Bytes',
u'alertThresholdSpace': u'1.197207956992E13 Bytes',
u'availableSpace': u'1.3302310633472E13 Bytes',
u'badSpace': u'0.0 Bytes',
u'time': u'02/02/2015 02:23:39 PM',
u'scSerialNumber': 64702,
u'instanceName': u'Storage Center 64702',
u'storageAlertThreshold': 10,
u'objectType': u'StorageCenterStorageUsage'}
IQN = 'iqn.2002-03.com.compellent:5000D31000000001'
WWN = u'21000024FF30441C'
WWNS = [u'21000024FF30441C',
u'21000024FF30441D']
FLDR_PATH = 'StorageCenter/ScVolumeFolder/'
# Create a Response object that indicates OK
response_ok = models.Response()
response_ok.status_code = 200
response_ok.reason = u'ok'
RESPONSE_200 = response_ok
# Create a Response object that indicates created
response_created = models.Response()
response_created.status_code = 201
response_created.reason = u'created'
RESPONSE_201 = response_created
# Create a Response object that indicates a failure (no content)
response_nc = models.Response()
response_nc.status_code = 204
response_nc.reason = u'duplicate'
RESPONSE_204 = response_nc
def setUp(self):
super(DellSCSanAPITestCase, self).setUp()
# Configuration is a mock. A mock is pretty much a blank
# slate. I believe mock's done in setup are not happy time
# mocks. So we just do a few things like driver config here.
self.configuration = mock.Mock()
self.configuration.san_is_local = False
self.configuration.san_ip = "192.168.0.1"
self.configuration.san_login = "admin"
self.configuration.san_password = "mmm"
self.configuration.dell_sc_ssn = 12345
self.configuration.dell_sc_server_folder = 'opnstktst'
self.configuration.dell_sc_volume_folder = 'opnstktst'
self.configuration.dell_sc_api_port = 3033
self.configuration.iscsi_ip_address = '192.168.1.1'
self.configuration.iscsi_port = 3260
self._context = context.get_admin_context()
# Set up the StorageCenterApi
self.scapi = dell_storagecenter_api.StorageCenterApi(
self.configuration.san_ip,
self.configuration.dell_sc_api_port,
self.configuration.san_login,
self.configuration.san_password)
self.volid = str(uuid.uuid4())
self.volume_name = "volume" + self.volid
def test_path_to_array(self,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._path_to_array(u'folder1/folder2/folder3')
expected = [u'folder1', u'folder2', u'folder3']
self.assertEqual(expected, res, 'Unexpected folder path')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_result',
return_value=SC)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_sc(self,
mock_get,
mock_get_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_sc(64702)
mock_get.assert_called_once_with('StorageCenter/StorageCenter')
mock_get_result.assert_called()
self.assertEqual(u'64702', res, 'Unexpected SSN')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_result',
return_value=None)
def test_find_sc_failure(self,
mock_get_result,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.find_sc, 12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_folder(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._create_folder(
'StorageCenter/ScVolumeFolder', 12345, '',
self.configuration.dell_sc_volume_folder)
mock_post.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.FLDR, res, 'Unexpected Folder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_folder_with_parent(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where parent folder name is specified
res = self.scapi._create_folder(
'StorageCenter/ScVolumeFolder', 12345, 'parentFolder',
self.configuration.dell_sc_volume_folder)
mock_post.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.FLDR, res, 'Unexpected Folder')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_create_folder_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._create_folder(
'StorageCenter/ScVolumeFolder', 12345, '',
self.configuration.dell_sc_volume_folder)
self.assertIsNone(res, 'Test Create folder - None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_path_to_array',
return_value=['Cinder_Test_Folder'])
def test_create_folder_path(self,
mock_path_to_array,
mock_find_folder,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._create_folder_path(
'StorageCenter/ScVolumeFolder', 12345,
self.configuration.dell_sc_volume_folder)
mock_path_to_array.assert_called_once_with(
self.configuration.dell_sc_volume_folder)
mock_find_folder.assert_called()
self.assertEqual(self.FLDR, res, 'Unexpected ScFolder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_path_to_array',
return_value=['Cinder_Test_Folder'])
def test_create_folder_path_create_fldr(self,
mock_path_to_array,
mock_find_folder,
mock_create_folder,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where folder is not found and must be created
res = self.scapi._create_folder_path(
'StorageCenter/ScVolumeFolder', 12345,
self.configuration.dell_sc_volume_folder)
mock_path_to_array.assert_called_once_with(
self.configuration.dell_sc_volume_folder)
mock_find_folder.assert_called()
mock_create_folder.assert_called()
self.assertEqual(self.FLDR, res, 'Unexpected ScFolder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_path_to_array',
return_value=['Cinder_Test_Folder'])
def test_create_folder_path_failure(self,
mock_path_to_array,
mock_find_folder,
mock_create_folder,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where folder is not found, must be created
# and creation fails
res = self.scapi._create_folder_path(
'StorageCenter/ScVolumeFolder', 12345,
self.configuration.dell_sc_volume_folder)
mock_path_to_array.assert_called_once_with(
self.configuration.dell_sc_volume_folder)
mock_find_folder.assert_called()
mock_create_folder.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_result',
return_value=u'devstackvol/fcvm/')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_folder(self,
mock_post,
mock_get_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_folder(
'StorageCenter/ScVolumeFolder', 12345,
self.configuration.dell_sc_volume_folder)
mock_post.assert_called()
mock_get_result.assert_called()
self.assertEqual(u'devstackvol/fcvm/', res, 'Unexpected folder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_result',
return_value=u'devstackvol/fcvm/')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_folder_multi_fldr(self,
mock_post,
mock_get_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case for folder path with multiple folders
res = self.scapi._find_folder(
'StorageCenter/ScVolumeFolder', 12345,
u'testParentFolder/opnstktst')
mock_post.assert_called()
mock_get_result.assert_called()
self.assertEqual(u'devstackvol/fcvm/', res, 'Unexpected folder')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_find_folder_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_folder(
'StorageCenter/ScVolumeFolder', 12345,
self.configuration.dell_sc_volume_folder)
self.assertIsNone(res, 'Test find folder - None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_folder_path',
return_value=FLDR)
def test_create_volume_folder_path(self,
mock_create_vol_fldr_path,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._create_volume_folder_path(
12345,
self.configuration.dell_sc_volume_folder)
mock_create_vol_fldr_path.assert_called_once_with(
'StorageCenter/ScVolumeFolder',
12345,
self.configuration.dell_sc_volume_folder)
self.assertEqual(self.FLDR, res, 'Unexpected ScFolder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=FLDR)
def test_find_volume_folder(self,
mock_find_folder,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_volume_folder(
12345,
self.configuration.dell_sc_volume_folder)
mock_find_folder.assert_called_once_with(
'StorageCenter/ScVolumeFolder/GetList',
12345,
self.configuration.dell_sc_volume_folder)
self.assertEqual(self.FLDR, res, 'Unexpected Folder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SCSERVERS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_init_volume(self,
mock_post,
mock_get_json,
mock_map_volume,
mock_unmap_volume,
mock_close_connection,
mock_open_connection,
mock_init):
self.scapi._init_volume(self.VOLUME)
mock_map_volume.assert_called()
mock_unmap_volume.assert_called()
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_init_volume_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScServer list fails
self.scapi._init_volume(self.VOLUME)
mock_post.assert_called()
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SCSERVERS_DOWN)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_init_volume_servers_down(self,
mock_post,
mock_get_json,
mock_map_volume,
mock_unmap_volume,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScServer Status = Down
self.scapi._init_volume(self.VOLUME)
mock_map_volume.assert_called()
mock_unmap_volume.assert_called()
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_volume(self,
mock_post,
mock_find_volume_folder,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_volume(
self.volume_name,
1,
12345,
self.configuration.dell_sc_volume_folder)
mock_post.assert_called()
mock_get_json.assert_called()
mock_find_volume_folder.assert_called_once_with(
12345, self.configuration.dell_sc_volume_folder)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_volume_folder_path',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_vol_and_folder(self,
mock_post,
mock_find_volume_folder,
mock_create_vol_folder_path,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test calling create_volume where volume folder has to be created
res = self.scapi.create_volume(
self.volume_name,
1,
12345,
self.configuration.dell_sc_volume_folder)
mock_post.assert_called()
mock_get_json.assert_called()
mock_create_vol_folder_path.assert_called_once_with(
12345,
self.configuration.dell_sc_volume_folder)
mock_find_volume_folder.assert_called_once_with(
12345, self.configuration.dell_sc_volume_folder)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_volume_folder_path',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_vol_folder_fail(self,
mock_post,
mock_find_volume_folder,
mock_create_vol_folder_path,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test calling create_volume where volume folder does not exist and
# fails to be created
res = self.scapi.create_volume(
self.volume_name,
1,
12345,
self.configuration.dell_sc_volume_folder)
mock_post.assert_called()
mock_get_json.assert_called()
mock_create_vol_folder_path.assert_called_once_with(
12345,
self.configuration.dell_sc_volume_folder)
mock_find_volume_folder.assert_called_once_with(
12345, self.configuration.dell_sc_volume_folder)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_create_volume_failure(self,
mock_post,
mock_find_volume_folder,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_volume(
self.volume_name,
1,
12345,
self.configuration.dell_sc_volume_folder)
mock_find_volume_folder.assert_called_once_with(
12345, self.configuration.dell_sc_volume_folder)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_volume_by_name(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case to find volume by name
res = self.scapi.find_volume(12345,
self.volume_name)
mock_post.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.VOLUME, res, 'Unexpected volume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
# Test case to find volume by InstancedId
def test_find_volume_by_instanceid(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_volume(12345,
None,
'64702.3494')
mock_post.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.VOLUME, res, 'Unexpected volume')
def test_find_volume_no_name_or_instance(self,
mock_close_connection,
mock_open_connection,
mock_init):
# Test calling find_volume with no name or instanceid
res = self.scapi.find_volume(12345)
self.assertEqual(res, None, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_find_volume_not_found(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test calling find_volume with result of no volume found
res = self.scapi.find_volume(12345,
self.volume_name)
self.assertEqual(None, res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=True)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
def test_delete_volume(self,
mock_find_volume,
mock_delete,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.delete_volume(12345,
self.volume_name)
mock_delete.assert_called()
mock_find_volume.assert_called_once_with(12345, self.volume_name, None)
mock_get_json.assert_called()
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_204)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
def test_delete_volume_failure(self,
mock_find_volume,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.delete_volume, 12345, self.volume_name)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
def test_delete_volume_no_vol_found(self,
mock_find_volume,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where volume to be deleted does not exist
res = self.scapi.delete_volume(12345,
self.volume_name)
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_folder_path',
return_value=SVR_FLDR)
def test_create_server_folder_path(self,
mock_create_svr_fldr_path,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._create_server_folder_path(
12345,
self.configuration.dell_sc_server_folder)
mock_create_svr_fldr_path.assert_called_once_with(
'StorageCenter/ScServerFolder',
12345,
self.configuration.dell_sc_server_folder)
self.assertEqual(self.SVR_FLDR, res, 'Unexpected server folder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=SVR_FLDR)
def test_find_server_folder(self,
mock_find_folder,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_server_folder(
12345,
self.configuration.dell_sc_server_folder)
mock_find_folder.assert_called_once_with(
'StorageCenter/ScServerFolder/GetList',
12345,
self.configuration.dell_sc_server_folder)
self.assertEqual(self.SVR_FLDR, res, 'Unexpected server folder')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_add_hba(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._add_hba(self.SCSERVER,
self.IQN,
False)
mock_post.assert_called()
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_add_hba_fc(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._add_hba(self.SCSERVER,
self.WWN,
True)
mock_post.assert_called()
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_add_hba_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._add_hba(self.SCSERVER,
self.IQN,
False)
mock_post.assert_called()
self.assertFalse(res, 'Expected False')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SVR_OS_S)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_serveros(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_serveros(12345, 'Red Hat Linux 6.x')
mock_get_json.assert_called()
mock_post.assert_called()
self.assertEqual('64702.38', res, 'Wrong InstanceId')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SVR_OS_S)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_serveros_not_found(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test requesting a Server OS that will not be found
res = self.scapi._find_serveros(12345, 'Non existent OS')
mock_get_json.assert_called()
mock_post.assert_called()
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_find_serveros_failed(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_serveros(12345, 'Red Hat Linux 6.x')
self.assertEqual(None, res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=FC_HBA)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=SCSERVER)
def test_create_server_multiple_hbas(self,
mock_create_server,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_server_multiple_hbas(
12345,
self.configuration.dell_sc_server_folder,
self.WWNS)
mock_create_server.assert_called()
mock_add_hba.assert_called()
self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=SVR_FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_server(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_first_result,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_server(
12345,
self.configuration.dell_sc_server_folder,
self.IQN,
False)
mock_find_serveros.assert_called()
mock_find_server_folder.assert_called()
mock_first_result.assert_called()
mock_add_hba.assert_called()
self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=SVR_FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_server_os_not_found(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_first_result,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_server(
12345,
self.configuration.dell_sc_server_folder,
self.IQN,
False)
mock_find_serveros.assert_called()
self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_server_folder_path',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_server_fldr_not_found(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_create_svr_fldr_path,
mock_first_result,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_server(
12345,
self.configuration.dell_sc_server_folder,
self.IQN,
False)
mock_find_server_folder.assert_called()
mock_create_svr_fldr_path.assert_called()
self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_server_folder_path',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_create_server_failure(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_create_svr_fldr_path,
mock_first_result,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_server(
12345,
self.configuration.dell_sc_server_folder,
self.IQN,
False)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_server_folder_path',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_server_not_found(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_create_svr_fldr_path,
mock_first_result,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
# Test create server where _first_result is None
res = self.scapi.create_server(
12345,
self.configuration.dell_sc_server_folder,
self.IQN,
False)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_delete_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=SVR_FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_server_addhba_fail(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_first_result,
mock_add_hba,
mock_delete_server,
mock_close_connection,
mock_open_connection,
mock_init):
# Tests create server where add hba fails
res = self.scapi.create_server(
12345,
self.configuration.dell_sc_server_folder,
self.IQN,
False)
mock_delete_server.assert_called()
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serverhba',
return_value=ISCSI_HBA)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_server(self,
mock_post,
mock_find_serverhba,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_server(12345,
self.IQN)
mock_find_serverhba.assert_called()
mock_first_result.assert_called()
self.assertIsNotNone(res, 'Expected ScServer')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serverhba',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_server_no_hba(self,
mock_post,
mock_find_serverhba,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where a ScServer HBA does not exist with the specified IQN
# or WWN
res = self.scapi.find_server(12345,
self.IQN)
mock_find_serverhba.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serverhba',
return_value=ISCSI_HBA)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_find_server_failure(self,
mock_post,
mock_find_serverhba,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where a ScServer does not exist with the specified
# ScServerHba
res = self.scapi.find_server(12345,
self.IQN)
mock_find_serverhba.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=ISCSI_HBA)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_serverhba(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_server(12345,
self.IQN)
mock_post.assert_called()
mock_first_result.assert_called()
self.assertIsNotNone(res, 'Expected ScServerHba')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_find_serverhba_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where a ScServer does not exist with the specified
# ScServerHba
res = self.scapi.find_server(12345,
self.IQN)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_domains(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_domains(u'64702.5764839588723736074.69')
mock_get .assert_called()
mock_get_json.assert_called()
self.assertEqual(
self.ISCSI_FLT_DOMAINS, res, 'Unexpected ScIscsiFaultDomain')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_find_domains_error(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where get of ScControllerPort FaultDomainList fails
res = self.scapi._find_domains(u'64702.5764839588723736074.69')
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_domain(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_domain(u'64702.5764839588723736074.69',
u'192.168.0.21')
mock_get .assert_called()
mock_get_json.assert_called()
self.assertIsNotNone(res, 'Expected ScIscsiFaultDomain')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_find_domain_error(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where get of ScControllerPort FaultDomainList fails
res = self.scapi._find_domain(u'64702.5764839588723736074.69',
u'192.168.0.21')
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_domain_not_found(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where domainip does not equal any WellKnownIpAddress
# of the fault domains
res = self.scapi._find_domain(u'64702.5764839588723736074.69',
u'192.168.0.22')
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=FC_HBAS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_fc_initiators(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_fc_initiators(self.SCSERVER)
mock_get.assert_called()
mock_get_json.assert_called()
self.assertIsNotNone(res, 'Expected WWN list')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_find_fc_initiators_error(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where get of ScServer HbaList fails
res = self.scapi._find_fc_initiators(self.SCSERVER)
self.assertListEqual([], res, 'Expected empty list')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_get_volume_count(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.get_volume_count(self.SCSERVER)
mock_get.assert_called()
mock_get_json.assert_called()
self.assertEqual(len(self.MAPPINGS), res, 'Mapping count mismatch')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_get_volume_count_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case of where get of ScServer MappingList fails
res = self.scapi.get_volume_count(self.SCSERVER)
mock_get.assert_called()
self.assertEqual(-1, res, 'Mapping count not -1')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_get_volume_count_no_volumes(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.get_volume_count(self.SCSERVER)
mock_get.assert_called()
mock_get_json.assert_called()
self.assertEqual(len([]), res, 'Mapping count mismatch')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_mappings(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_mappings(self.VOLUME)
mock_get.assert_called()
mock_get_json.assert_called()
self.assertEqual(self.MAPPINGS, res, 'Mapping mismatch')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_mappings_inactive_vol(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test getting volume mappings on inactive volume
res = self.scapi._find_mappings(self.INACTIVE_VOLUME)
mock_get.assert_called()
self.assertEqual([], res, 'No mappings expected')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_find_mappings_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case of where get of ScVolume MappingList fails
res = self.scapi._find_mappings(self.VOLUME)
mock_get.assert_called()
self.assertEqual([], res, 'Mapping count not empty')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_mappings_no_mappings(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScVolume has no mappings
res = self.scapi._find_mappings(self.VOLUME)
mock_get.assert_called()
mock_get_json.assert_called()
self.assertEqual([], res, 'Mapping count mismatch')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_controller_port(self,
mock_get,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_controller_port(u'64702.5764839588723736070.51')
mock_get.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.CTRLR_PORT, res, 'ScControllerPort mismatch')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_find_controller_port_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where get of ScVolume MappingList fails
res = self.scapi._find_controller_port(self.VOLUME)
mock_get.assert_called()
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=FC_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=FC_MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_fc_initiators',
return_value=WWNS)
def test_find_wwns(self,
mock_find_fc_initiators,
mock_find_mappings,
mock_find_controller_port,
mock_close_connection,
mock_open_connection,
mock_init):
lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME,
self.SCSERVER)
mock_find_fc_initiators.assert_called()
mock_find_mappings.assert_called()
mock_find_controller_port.assert_called()
# The _find_controller_port is Mocked, so all mapping pairs
# will have the same WWN for the ScControllerPort
itmapCompare = {u'21000024FF30441C': [u'5000D31000FCBE36'],
u'21000024FF30441D':
[u'5000D31000FCBE36', u'5000D31000FCBE36']}
self.assertEqual(1, lun, 'Incorrect LUN')
self.assertIsNotNone(wwns, 'WWNs is None')
self.assertEqual(itmapCompare, itmap, 'WWN mapping incorrect')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=[])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_fc_initiators',
return_value=FC_HBAS)
def test_find_wwns_no_mappings(self,
mock_find_fc_initiators,
mock_find_mappings,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are no ScMapping(s)
lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME,
self.SCSERVER)
mock_find_fc_initiators.assert_called()
mock_find_mappings.assert_called()
self.assertEqual(None, lun, 'Incorrect LUN')
self.assertEqual([], wwns, 'WWNs is not empty')
self.assertEqual({}, itmap, 'WWN mapping not empty')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=FC_MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_fc_initiators',
return_value=WWNS)
def test_find_wwns_no_ctlr_port(self,
mock_find_fc_initiators,
mock_find_mappings,
mock_find_controller_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScControllerPort is none
lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME,
self.SCSERVER)
mock_find_fc_initiators.assert_called()
mock_find_mappings.assert_called()
mock_find_controller_port.assert_called()
self.assertEqual(None, lun, 'Incorrect LUN')
self.assertEqual([], wwns, 'WWNs is not empty')
self.assertEqual({}, itmap, 'WWN mapping not empty')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
def test_find_iscsi_properties_mappings(self,
mock_find_mappings,
mock_find_domain,
mock_find_ctrl_port,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_iscsi_properties(self.VOLUME)
mock_find_mappings.assert_called()
mock_find_domain.assert_called()
mock_find_ctrl_port.assert_called()
expected = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_luns': [1],
'target_portals': [u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
def test_find_iscsi_properties_by_address(self,
mock_find_mappings,
mock_find_domain,
mock_find_ctrl_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case to find iSCSI mappings by IP Address & port
res = self.scapi.find_iscsi_properties(
self.VOLUME, '192.168.0.21', 3260)
mock_find_mappings.assert_called()
mock_find_domain.assert_called()
mock_find_ctrl_port.assert_called()
expected = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_luns': [1],
'target_portals': [u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
def test_find_iscsi_properties_by_address_not_found(self,
mock_find_mappings,
mock_find_domain,
mock_find_ctrl_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case to find iSCSI mappings by IP Address & port are not found
res = self.scapi.find_iscsi_properties(
self.VOLUME, '192.168.1.21', 3260)
mock_find_mappings.assert_called()
mock_find_domain.assert_called()
mock_find_ctrl_port.assert_called()
expected = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns': [],
'target_luns': [],
'target_portals': []}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=[])
def test_find_iscsi_properties_no_mapping(self,
mock_find_mappings,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are no ScMapping(s)
res = self.scapi.find_iscsi_properties(self.VOLUME)
mock_find_mappings.assert_called()
expected = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns': [],
'target_luns': [],
'target_portals': []}
self.assertEqual(expected, res, 'Expected empty Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
def test_find_iscsi_properties_no_domain(self,
mock_find_mappings,
mock_find_domain,
mock_find_ctrl_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are no ScFaultDomain(s)
res = self.scapi.find_iscsi_properties(self.VOLUME)
mock_find_mappings.assert_called()
mock_find_domain.assert_called()
mock_find_ctrl_port.assert_called()
expected = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns': [],
'target_luns': [],
'target_portals': []}
self.assertEqual(expected, res, 'Expected empty Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
def test_find_iscsi_properties_no_ctrl_port(self,
mock_find_mappings,
mock_find_domain,
mock_find_ctrl_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are no ScFaultDomain(s)
res = self.scapi.find_iscsi_properties(self.VOLUME)
mock_find_mappings.assert_called()
mock_find_domain.assert_called()
mock_find_ctrl_port.assert_called()
expected = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns': [],
'target_luns': [],
'target_portals': []}
self.assertEqual(expected, res, 'Expected empty Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS_READ_ONLY)
def test_find_iscsi_properties_ro(self,
mock_find_mappings,
mock_find_domain,
mock_find_ctrl_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where Read Only mappings are found
res = self.scapi.find_iscsi_properties(self.VOLUME)
mock_find_mappings.assert_called()
mock_find_domain.assert_called()
mock_find_ctrl_port.assert_called()
expected = {'access_mode': 'ro',
'target_discovered': False,
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_luns': [1],
'target_portals': [u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS_MULTI_PORTALS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS_MULTI_PORTAL)
def test_find_iscsi_properties_multi_portals(self,
mock_find_mappings,
mock_find_domain,
mock_find_ctrl_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are multiple portals
res = self.scapi.find_iscsi_properties(self.VOLUME)
mock_find_mappings.assert_called()
mock_find_domain.assert_called()
mock_find_ctrl_port.assert_called()
expected = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_luns': [1],
'target_portals':
[u'192.168.0.21:3260', u'192.168.0.25:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=MAP_PROFILE)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_map_volume(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.map_volume(self.VOLUME,
self.SCSERVER)
mock_post.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.MAP_PROFILE, res, 'Incorrect ScMappingProfile')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_map_volume_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where mapping volume to server fails
res = self.scapi.map_volume(self.VOLUME,
self.SCSERVER)
mock_post.assert_called()
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=MAP_PROFILES)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_unmap_volume(self,
mock_get,
mock_get_json,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.unmap_volume(self.VOLUME,
self.SCSERVER)
mock_get.assert_called()
mock_get_json.assert_called()
mock_delete.assert_called()
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_unmap_volume_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.unmap_volume(self.VOLUME,
self.SCSERVER)
mock_get.assert_called()
self.assertFalse(res, 'Expected False')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_unmap_volume_no_map_profile(self,
mock_get,
mock_get_json,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.unmap_volume(self.VOLUME,
self.SCSERVER)
mock_get.assert_called()
mock_get_json.assert_called()
mock_delete.assert_called()
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_204)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=MAP_PROFILES)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_unmap_volume_del_fail(self,
mock_get,
mock_get_json,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.unmap_volume(self.VOLUME,
self.SCSERVER)
mock_get.assert_called()
mock_get_json.assert_called()
mock_delete.assert_called()
self.assertFalse(res, 'Expected False')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=STRG_USAGE)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_get_storage_usage(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.get_storage_usage(64702)
mock_get.assert_called()
mock_get_json.assert_called()
self.assertEqual(self.STRG_USAGE, res, 'Unexpected ScStorageUsage')
def test_get_storage_usage_no_ssn(self,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where SSN is none
res = self.scapi.get_storage_usage(None)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
# Test case where get of Storage Usage fails
def test_get_storage_usage_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.get_storage_usage(64702)
mock_get.assert_called()
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=RPLAY)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_replay(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_replay(self.VOLUME,
'Test Replay',
60)
mock_post.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=RPLAY)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_init_volume')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_replay_inact_vol(self,
mock_post,
mock_init_volume,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where the specified volume is inactive
res = self.scapi.create_replay(self.INACTIVE_VOLUME,
'Test Replay',
60)
mock_post.assert_called()
mock_init_volume.assert_called_once_with(self.INACTIVE_VOLUME)
mock_first_result.assert_called()
self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=RPLAY)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_replay_no_expire(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_replay(self.VOLUME,
'Test Replay',
0)
mock_post.assert_called()
mock_first_result.assert_called()
self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_replay_no_volume(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where no ScVolume is specified
res = self.scapi.create_replay(None,
'Test Replay',
60)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_create_replay_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where create ScReplay fails
res = self.scapi.create_replay(self.VOLUME,
'Test Replay',
60)
mock_post.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=RPLAYS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_replay(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_replay(self.VOLUME,
u'Cinder Test Replay012345678910')
mock_post.assert_called()
mock_get_json.assert_called()
self.assertEqual(self.TST_RPLAY, res, 'Unexpected ScReplay')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_replay_no_replays(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where no replays are found
res = self.scapi.find_replay(self.VOLUME,
u'Cinder Test Replay012345678910')
mock_post.assert_called()
mock_get_json.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_find_replay_failure(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where None is returned for replays
res = self.scapi.find_replay(self.VOLUME,
u'Cinder Test Replay012345678910')
mock_post.assert_called()
mock_get_json.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value=RPLAYS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_delete_replay(self,
mock_post,
mock_find_replay,
mock_close_connection,
mock_open_connection,
mock_init):
replayId = u'Cinder Test Replay012345678910'
res = self.scapi.delete_replay(self.VOLUME,
replayId)
mock_post.assert_called()
mock_find_replay.assert_called_once_with(self.VOLUME, replayId)
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_delete_replay_no_replay(self,
mock_post,
mock_find_replay,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where specified ScReplay does not exist
replayId = u'Cinder Test Replay012345678910'
res = self.scapi.delete_replay(self.VOLUME,
replayId)
mock_post.assert_called()
mock_find_replay.assert_called_once_with(self.VOLUME, replayId)
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value=TST_RPLAY)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_delete_replay_failure(self,
mock_post,
mock_find_replay,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where delete ScReplay results in an error
replayId = u'Cinder Test Replay012345678910'
res = self.scapi.delete_replay(self.VOLUME,
replayId)
mock_post.assert_called()
mock_find_replay.assert_called_once_with(self.VOLUME, replayId)
self.assertFalse(res, 'Expected False')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_view_volume(self,
mock_post,
mock_find_volume_folder,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
self.configuration.dell_sc_volume_folder,
self.TST_RPLAY)
mock_post.assert_called()
mock_find_volume_folder.assert_called_once_with(
64702,
self.configuration.dell_sc_volume_folder)
mock_first_result.assert_called()
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_volume_folder_path',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_view_volume_create_fldr(self,
mock_post,
mock_find_volume_folder,
mock_create_volume_folder,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where volume folder does not exist and must be created
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
self.configuration.dell_sc_volume_folder,
self.TST_RPLAY)
mock_post.assert_called()
mock_find_volume_folder.assert_called_once_with(
64702,
self.configuration.dell_sc_volume_folder)
mock_create_volume_folder.assert_called_once_with(
64702,
self.configuration.dell_sc_volume_folder)
mock_first_result.assert_called()
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_volume_folder_path',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_view_volume_no_vol_fldr(self,
mock_post,
mock_find_volume_folder,
mock_create_volume_folder,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where volume folder does not exist and cannot be created
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
self.configuration.dell_sc_volume_folder,
self.TST_RPLAY)
mock_post.assert_called()
mock_find_volume_folder.assert_called_once_with(
64702,
self.configuration.dell_sc_volume_folder)
mock_create_volume_folder.assert_called_once_with(
64702,
self.configuration.dell_sc_volume_folder)
mock_first_result.assert_called()
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_create_view_volume_failure(self,
mock_post,
mock_find_volume_folder,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where view volume create fails
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
self.configuration.dell_sc_volume_folder,
self.TST_RPLAY)
mock_post.assert_called()
mock_find_volume_folder.assert_called_once_with(
64702,
self.configuration.dell_sc_volume_folder)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_view_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay',
return_value=RPLAY)
def test_create_cloned_volume(self,
mock_create_replay,
mock_create_view_volume,
mock_close_connection,
mock_open_connection,
mock_init):
vol_name = u'Test_create_clone_vol'
res = self.scapi.create_cloned_volume(
vol_name,
self.configuration.dell_sc_volume_folder,
self.VOLUME)
mock_create_replay.assert_called_once_with(self.VOLUME,
'Cinder Clone Replay',
60)
mock_create_view_volume.assert_called_once_with(
vol_name,
self.configuration.dell_sc_volume_folder,
self.RPLAY)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay',
return_value=None)
def test_create_cloned_volume_failure(self,
mock_create_replay,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where create cloned volumes fails because create_replay
# fails
vol_name = u'Test_create_clone_vol'
res = self.scapi.create_cloned_volume(
vol_name,
self.configuration.dell_sc_volume_folder,
self.VOLUME)
mock_create_replay.assert_called_once_with(self.VOLUME,
'Cinder Clone Replay',
60)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_expand_volume(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.expand_volume(self.VOLUME, 550)
mock_post.assert_called()
mock_get_json.assert_called()
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_expand_volume_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.expand_volume(self.VOLUME, 550)
mock_post.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
def test_delete_server(self,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._delete_server(self.SCSERVER)
mock_delete.assert_called()
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
def test_delete_server_del_not_allowed(self,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where delete of ScServer not allowed
res = self.scapi._delete_server(self.SCSERVER_NO_DEL)
mock_delete.assert_called()
self.assertIsNone(res, 'Expected None')
class DellSCSanAPIConnectionTestCase(test.TestCase):
'''DellSCSanAPIConnectionTestCase
Class to test the Storage Center API connection using Mock.
'''
# Create a Response object that indicates OK
response_ok = models.Response()
response_ok.status_code = 200
response_ok.reason = u'ok'
RESPONSE_200 = response_ok
# Create a Response object that indicates a failure (no content)
response_nc = models.Response()
response_nc.status_code = 204
response_nc.reason = u'duplicate'
RESPONSE_204 = response_nc
def setUp(self):
super(DellSCSanAPIConnectionTestCase, self).setUp()
# Configuration is a mock. A mock is pretty much a blank
# slate. I believe mock's done in setup are not happy time
# mocks. So we just do a few things like driver config here.
self.configuration = mock.Mock()
self.configuration.san_is_local = False
self.configuration.san_ip = "192.168.0.1"
self.configuration.san_login = "admin"
self.configuration.san_password = "mmm"
self.configuration.dell_sc_ssn = 12345
self.configuration.dell_sc_server_folder = 'opnstktst'
self.configuration.dell_sc_volume_folder = 'opnstktst'
self.configuration.dell_sc_api_port = 3033
self.configuration.iscsi_ip_address = '192.168.1.1'
self.configuration.iscsi_port = 3260
self._context = context.get_admin_context()
# Set up the StorageCenterApi
self.scapi = dell_storagecenter_api.StorageCenterApi(
self.configuration.san_ip,
self.configuration.dell_sc_api_port,
self.configuration.san_login,
self.configuration.san_password)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_open_connection(self,
mock_post):
self.scapi.open_connection()
mock_post.assert_called()
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_open_connection_failure(self,
mock_post):
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.open_connection)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_close_connection(self,
mock_post):
self.scapi.close_connection()
mock_post.assert_called()
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_close_connection_failure(self,
mock_post):
self.scapi.close_connection()
mock_post.assert_called()
| Akrog/cinder | cinder/tests/test_dellscapi.py | Python | apache-2.0 | 155,946 |
import click
@click.command('config', short_help='Display remote client config')
@click.pass_obj
def cli(obj):
"""Display client config downloaded from API server."""
for k, v in obj.items():
if isinstance(v, list):
v = ', '.join(v)
click.echo(f'{k:20}: {v}')
| alerta/python-alerta-client | alertaclient/commands/cmd_config.py | Python | apache-2.0 | 298 |
#!/Users/vishnu/anaconda/bin/python
import random
import sys
"""class schema:
files=[]
def __init__(self):
pass
def addFile(self,file):
self.files.append(file)
def setForeignKey(self,primaryFile,theOtherOne):
pass"""
class JoinReq:
def __init__(self,R,S,m,n,fing):
self.cost=0
self.first_req=True
self.cost = 0
tC,self.t1=R.getFirst(m)
#self.cost += tC
tC,self.t2=S.getFirst(n)
#self.cost += tC
self.first_req==False
self.R=R
self.S=S
self.m=m
self.n=n
self.fing=fing
def pull(self):
if self.fing==False:
temp=""
while self.t1 is not None:
#print str(t1[m]) + "=" + str(t2[n])
#print "x"
while self.t2 is not None:
#print str(self.t1[self.m]) + "=" + str(self.t2[self.n])
if self.t1[self.m]==self.t2[self.n]:
#self.emit((self.t1,self.t2))
temp= (self.t1,self.t2)
self.t2=self.S.getNext(self.n)
self.cost+=1
return temp
self.t2=self.S.getNext(self.n)
self.cost+=1
#print "vishnu"
self.t1=self.R.getNext(self.m)
self.cost+=1
#print str(t1) + "xx"
#if t2==None:
tC,self.t2=self.S.getFirst(self.n)
self.cost+=tC
return "eoo"
else:
"""savedLastKey=-1
while self.t1 is not None:
if self.t1>=savedLastKey:
while self.t2 is not None:
if self.t1[self.m]==self.t2[self.n]:
#self.emit((self.t1,self.t2))
temp= (self.t1,self.t2)
self.t2=self.S.getNext(self.n)
self.cost+=1
return temp
self.t2=self.S.getNext(self.n)
self.cost+=1
else:
tC,self.t2=self.S.getFirst(self.n)
self.cost+=tC
while self.t2 is not None:
if self.t1[self.m]==self.t2[self.n]:
#self.emit((self.t1,self.t2))
temp= (self.t1,self.t2)
self.t2=self.S.getNext(self.n)
self.cost+=1
return temp
self.t2=self.S.getNext(self.n)
self.cost+=1
savedLastKey=self.t1
self.t1=self.R.getNext(self.m)
self.cost+=1
return "eoo" """
savedLastKey=-1
while self.t1 is not None:
while self.t1 is not None:
while self.t2 is not None and self.t1[self.m]>=self.t2[self.n]:
#print str(self.t1[self.m]) + "=" + str(self.t2[self.n])
if self.t1[self.m]==self.t2[self.n]:
#self.emit((self.t1,self.t2))
temp= (self.t1,self.t2)
self.t2=self.S.getNext(self.n)
self.cost+=1
return temp
self.t2=self.S.getNext(self.n)
self.cost+=1
if self.t2 is None:
#print "t2 go non"
while self.t1 is not None:
self.t1=self.R.getNext(self.m)
self.cost+=1
if savedLastKey>self.t1[self.m]:
tC,self.t2=self.S.getFirst(self.n)
#print tC
self.cost+=tC
break
if self.t2[self.n]>self.t1[self.m]:
break
while self.t2 is not None:
while self.t1 is not None and self.t2[self.n]>=self.t1[self.m]:
#print str(self.t1[self.m]) + "=" + str(self.t2[self.n])
if self.t1[self.m]==self.t2[self.n]:
#self.emit((self.t1,self.t2))
temp= (self.t1,self.t2)
self.t2=self.S.getNext(self.n)
self.cost+=1
return temp
savedLastKey=self.t1[self.m]
self.t1=self.R.getNext(self.m)
self.cost+=1
if self.t1 is None:
return "eoo"
if savedLastKey>self.t1[self.m]:
tC,self.t2=self.S.getFirst(self.n)
#print tC
self.cost+=tC
#print self.t2
if self.t1[self.m]>self.t2[self.n]:
break
return "eoo"
def getCost(self):
return self.cost
class Xf:
#max=25
#data={}
#stats size,columns,runs,fingers,pkey,max,range
#stats=(size,columns,runs,fingers,pkey,max,range)
#stats={}
#stats = {}
def __init__(self,name):
self.stats={}
self.max=25
self.keyCol=None
self.stats["Name"]=name
self.data={}
self.setStats(0,0,0,0,0,0,0)
pass
def setStats(self,size,columns,runs,fingers,ordered,pkey,max): #set the status values
#print self
#print type(self.stats)
self.stats["size"]=size
self.stats["keyCol"]=pkey
self.stats["max"]=max
self.stats["columns"]=columns
self.stats["runs"]=runs
self.stats["cursors"]=[0 for x in range(columns)]
self.keyCol=self.stats["keyCol"]
self.max=self.stats["max"]
self.stats["fingers"]=fingers
self.stats["ordered"]=ordered
self.fingers=fingers
pass
def sortCol(self):
pass
def reset(self):
self.stats["fingers"]=[0 if x!=-1 else x for x in self.stats["fingers"]]
def getSize(self):
return int(self.stats["size"])
def getRuns(self,col):
return int(self.stats["runs"][col])
def getFirst(self,col):
tuple1 =[]
for col in range(self.stats["columns"]):
tuple1.append(self.data[str(col)][0])
#print str(self.stats["fingers"][col]) + "*"
tCost = self.stats["fingers"][col]
#print tCost
self.stats["fingers"][col]=0
#if self.stats["Name"] == "s":
#print "getFrist " + self.stats["Name"] + str(tuple1[col])
return tCost, tuple1
pass
def getNext(self,col):
#print self
fingerPos=self.stats["fingers"][col]
#print str(fingerPos) + "-" + str(len(self.data[str(0)])-2)
if int(fingerPos)>=(len(self.data[str(col)])-2):
#self.stats["fingers"][col]=0
#print "yo"
return None
if self.stats["fingers"][col]!=-1 :
self.stats["fingers"][col]+=1
#print self.stats["fingers"][col]
tuple1 =[]
for col in range(self.stats["columns"]):
tuple1.append(self.data[str(col)][fingerPos])
#if self.stats["Name"] == "s":
#print "getNext " + self.stats["Name"]+ str(tuple1[col])
return tuple1
pass
def getFinger(self,col):
return self.fingerPos
pass
def emit(self,x):
#print "yo"
#print x
pass
def eJoin(self,S,m,n):
cost = 0
tC,t1=self.getFirst(m)
cost += tC
tC,t2=S.getFirst(n)
cost += tC
while t1 is not None:
#print str(t1[m]) + "=" + str(t2[n])
#print "x"
while t2 is not None:
#print str(t1[m]) + "=" + str(t2[n])
if t1[m]==t2[n]:
self.emit((t1,t2))
t2=S.getNext(n)
cost+=1
#print "vishnu"
t1=self.getNext(m)
cost+=1
#print str(t1) + "xx"
#if t2==None:
tC,t2=S.getFirst(n)
cost+=tC
return cost
pass
def eJoin_pull(self,S,m,n):
cost = 0
tC,t1=self.getFirst(m)
cost += tC
tC,t2=S.getFirst(n)
cost += tC
while t1 is not None:
#print str(t1[m]) + "=" + str(t2[n])
#print "x"
while t2 is not None:
#print str(t1[m]) + "=" + str(t2[n])
if t1[m]==t2[n]:
self.emit((t1,t2))
t2=S.getNext(n)
cost+=1
#print "vishnu"
t1=self.getNext(m)
cost+=1
#print str(t1) + "xx"
#if t2==None:
tC,t2=S.getFirst(n)
cost+=tC
return cost
pass
#def __init__(self):
# self.data={}
# pass
def __repr__(self):
t1=""
for key in self.data.keys():
t1 = t1 + str(key) + " : " + str(self.data[key]) +"\n"
t1= str(t1) + "\nprimary key: " + str(self.keyCol)
return t1
def setConstraints(self,key,max): #there is some reduntant code here. Remove
self.stats["keyCol"]=key
self.keyCol=key
self.max=max
self.stats["max"]=max
pass
def printStats(self):
print self.stats
def replaceDupandSum(self,list1,list2):
counter = 0
for i in range(len(list1)):
counter=0
for j in range(len(list2)):
if list2[j]==list1[i]:
#print "xx" + str(list2[j])
#counter+=1
#if counter>1:
list2[j]=(list2[j]+list2[j+1])/2
return list1+list2
pass
def FormData(self):
""" for col in range(self.cols):
if col == self.keyCol:
#print "key" + str(col)
#print runs
for r in range(self.runs[col]):
temp=sorted(random.sample(range(self.max),size/runs[col]))
#print temp
self.data[str(col)]=self.replaceDupandSum(self.data.get(str(col),[]),temp)
#self.data[str(col)]=set(self.data[str(col)])
#print self.data[str(col)]
else:
for r in range(self.runs[col]):
temp=sorted([random.randrange(self.max) for x in range(size/runs[col])])
self.data[str(col)]=self.data.get(str(col),[])+temp"""
self.Generate(self.stats["columns"],self.stats["runs"],self.stats["size"])
def Generate(self,cols,runs,size):
for col in range(cols):
if col == self.keyCol:
#print "key" + str(col)
print runs
for r in range(runs[col]):
temp=sorted(random.sample(range(self.max),size/runs[col]))
#print temp
self.data[str(col)]=self.replaceDupandSum(self.data.get(str(col),[]),temp)
#self.data[str(col)]=set(self.data[str(col)])
#print self.data[str(col)]
else:
for r in range(runs[col]):
temp=sorted([random.randrange(self.max) for x in range(size/runs[col])])
self.data[str(col)]=self.data.get(str(col),[])+temp
if self.stats["ordered"][col]==True:
self.data[str(col)]=sorted(self.data[str(col)])
def write2File(self,fileName):
fp = open(fileName,'w')
for col in range(cols):
#print self.data[str(col)]
stringD=""
for x in self.data[str(col)]:
stringD=stringD+" "+ str(x)
fp.write(stringD+"\n")
fp.close()
pass
def readFile(self,fileName):
lines = open(fileName).read().splitlines()
for x in range(cols):
self.data[str(x)]=lines[0]
pass
def nJoin(R,S,m,n):
t1=R.getFirst(m)
t2=S.getFirst(n)
while t1 is not None:
print str(t1[m]) + "=" + str(t2[n])
#print "x"
while t2 is not None:
print str(t1[m]) + "=" + str(t2[n])
if t1[m]==t2[n]:
R.emit((t1,t2))
t2=S.getNext(n)
print "vishnu"
t1=R.getNext(m)
print str(t1) + "xx"
#if t2==None:
t2=S.getFirst(n)
pass
#Generate(3,[3,3,3],9)
"""inst= file()
if len(sys.argv)>1:
cols = int(sys.argv[1])
runs = [int(x) for x in sys.argv[2:(len(sys.argv)-1)]]
size = int(sys.argv[len(sys.argv)-1])
#print inst.replaceDupandSum([1,6,9,12],[2,5,6,11])
inst.setConstraints(0,200000)
inst.Generate(cols,runs,size)
inst.write2File("file.txt")
"""
#inst2=file()
#inst2.readFile("file.txt")
#print inst2
"""
inst3=Xf("r")
inst3.setStats(10,2,(2,3),[-1,0],0,40)
inst3.FormData()
inst4=Xf("s")
inst4.setStats(20,2,(2,3),[-1,0],0,40)
inst4.FormData()
print inst3
print inst4
"""
#print inst3.getFirst(1)
#print inst4.getFirst(1)
#print inst3.getNext(1)
#print inst4.getNext(1)
#print inst3.getNext(1)
#print inst4.getNext(1)
#nJoin(inst3,inst4,1,1)
#print inst3.eJoin(inst4,1,1)
"""
inst3.printStats()
print inst3.getFirst()
print inst3.getNext(1)
print inst3.getNext(1)
print inst3.getNext(1)
print inst3.getNext(1)
print inst3.getNext(1)"""
#print inst
| vishnuprathish/constrained-data-generator | fingered_temp.py | Python | apache-2.0 | 10,459 |
"""
Find the closest DMC colors for a hex color.
Usage: python closest_colors.py <hexcolor>
"""
import sys
from .. import color
from .. import dmc_colors
def main():
if len(sys.argv) < 2:
sys.exit(__doc__)
hex_color = sys.argv[1]
rgb_color = color.RGBColorFromHexString(hex_color)
print 'Given RGB color', rgb_color
print
print 'Closest DMC colors by distance:'
for pair in dmc_colors.GetClosestDMCColorsPairs(rgb_color):
print 'Distance:', pair[1], dmc_colors.GetStringForDMCColor(pair[0])
if __name__ == '__main__':
main()
| nanaze/pystitch | pystitch/examples/closest_colors.py | Python | apache-2.0 | 562 |
"""
"""
from .register import get_registered_layers
#custom layer import begins
import axpy
import flatten
import argmax
import reshape
import roipooling
import priorbox
import permute
import detection_out
import normalize
import select
import crop
import reduction
#custom layer import ends
custom_layers = get_registered_layers()
def set_args(f, params, node=None):
""" set args for function 'f' using the parameters in node.layer.parameters
Args:
f (function): a python function object
params (object): a object contains attributes needed by f's arguments
Returns:
arg_names (list): a list of argument names
kwargs (dict): a dict contains needed arguments
"""
from ..protobuf_to_dict import protobuf_to_dict
argc = f.__code__.co_argcount
arg_list = f.__code__.co_varnames[0:argc]
kwargs = {}
for arg_name in arg_list:
if arg_name in params:
kwargs[arg_name] = params[arg_name]
if node is not None and len(node.metadata):
kwargs.update(node.metadata)
return arg_list, kwargs
def has_layer(kind):
""" test whether this layer exists in custom layer
"""
return kind in custom_layers
def compute_output_shape(kind, node):
assert kind in custom_layers, "layer[%s] not exist in custom layers" % (
kind)
shape_func = custom_layers[kind]['shape']
parents = node.parents
inputs = [list(p.output_shape) for p in parents]
arg_names, kwargs = set_args(shape_func, node.params)
if len(inputs) == 1:
inputs = inputs[0]
return shape_func(inputs, **kwargs)
def make_node(template, kind, node):
""" make a PaddleNode for custom layer which means construct
a piece of code to define a layer implemented in 'custom_layers'
Args:
@template (PaddleNode): a factory to new a instance of PaddleNode
@kind (str): type of custom layer
@node (graph.Node): a layer in the net
Returns:
instance of PaddleNode
"""
assert kind in custom_layers, "layer[%s] not exist in custom layers" % (
kind)
layer_func = custom_layers[kind]['layer']
#construct arguments needed by custom layer function from node's parameters
arg_names, kwargs = set_args(layer_func, node.params, node)
return template('custom_layer', kind, **kwargs)
def make_custom_layer(kind, inputs, name, *args, **kwargs):
""" execute a custom layer which is implemented by users
Args:
@kind (str): type name of this layer
@inputs (vars): variable list created by fluid
@namme (str): name for this layer
@args (tuple): other positional arguments
@kwargs (dict): other kv arguments
Returns:
output (var): output variable for this layer
"""
assert kind in custom_layers, "layer[%s] not exist in custom layers" % (
kind)
layer_func = custom_layers[kind]['layer']
return layer_func(inputs, name, *args, **kwargs)
| lcy-seso/models | fluid/image_classification/caffe2fluid/kaffe/custom_layers/__init__.py | Python | apache-2.0 | 2,996 |
#!/usr/bin/env python2.7
from nltk.book import *
fdist1 = FreqDist(text1)
fdist1.plot(50, cumulative=True) | prinsmike/nltk_book | ch01/13-cumulative_frequency_plot.py | Python | apache-2.0 | 109 |
# -*- coding: utf-8 -*-
def command():
return "edit-instance-vmware"
def init_argument(parser):
parser.add_argument("--instance-no", required=True)
parser.add_argument("--instance-type", required=True)
parser.add_argument("--key-name", required=True)
parser.add_argument("--compute-resource", required=True)
parser.add_argument("--is-static-ip", required=True)
parser.add_argument("--ip-address", required=False)
parser.add_argument("--subnet-mask", required=False)
parser.add_argument("--default-gateway", required=False)
parser.add_argument("--comment", required=False)
parser.add_argument("--root-size", required=False)
def execute(requester, args):
instance_no = args.instance_no
instance_type = args.instance_type
key_name = args.key_name
compute_resource = args.compute_resource
is_static_ip = args.is_static_ip
ip_address = args.ip_address
subnet_mask = args.subnet_mask
default_gateway = args.default_gateway
comment = args.comment
root_size = args.root_size
parameters = {}
parameters["InstanceNo"] = instance_no
parameters["InstanceType"] = instance_type
parameters["KeyName"] = key_name
parameters["ComputeResource"] = compute_resource
parameters["IsStaticIp"] = is_static_ip
if (ip_address != None):
parameters["IpAddress"] = ip_address
if (subnet_mask != None):
parameters["SubnetMask"] = subnet_mask
if (default_gateway != None):
parameters["DefaultGateway"] = default_gateway
if (comment != None):
parameters["Comment"] = comment
if (root_size != None):
parameters["RootSize"] = root_size
return requester.execute("/EditInstanceVmware", parameters)
| primecloud-controller-org/pcc-cli | src/pcc/api/instance/edit_instance_vmware.py | Python | apache-2.0 | 1,746 |
import os
import time
import ujson
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase
from mock import patch
from typing import Any, Callable, Dict, List, Mapping, Tuple
from zerver.lib.test_helpers import simulated_queue_client
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import get_client, UserActivity
from zerver.worker import queue_processors
class WorkerTest(ZulipTestCase):
class FakeClient(object):
def __init__(self):
# type: () -> None
self.consumers = {} # type: Dict[str, Callable]
self.queue = [] # type: List[Tuple[str, Dict[str, Any]]]
def register_json_consumer(self, queue_name, callback):
# type: (str, Callable) -> None
self.consumers[queue_name] = callback
def start_consuming(self):
# type: () -> None
for queue_name, data in self.queue:
callback = self.consumers[queue_name]
callback(data)
def test_mirror_worker(self):
# type: () -> None
fake_client = self.FakeClient()
data = [
dict(
message=u'\xf3test',
time=time.time(),
rcpt_to=self.example_email('hamlet'),
),
dict(
message='\xf3test',
time=time.time(),
rcpt_to=self.example_email('hamlet'),
),
dict(
message='test',
time=time.time(),
rcpt_to=self.example_email('hamlet'),
),
]
for element in data:
fake_client.queue.append(('email_mirror', element))
with patch('zerver.worker.queue_processors.mirror_email'):
with simulated_queue_client(lambda: fake_client):
worker = queue_processors.MirrorWorker()
worker.setup()
worker.start()
def test_UserActivityWorker(self):
# type: () -> None
fake_client = self.FakeClient()
user = self.example_user('hamlet')
UserActivity.objects.filter(
user_profile = user.id,
client = get_client('ios')
).delete()
data = dict(
user_profile_id = user.id,
client = 'ios',
time = time.time(),
query = 'send_message'
)
fake_client.queue.append(('user_activity', data))
with simulated_queue_client(lambda: fake_client):
worker = queue_processors.UserActivityWorker()
worker.setup()
worker.start()
activity_records = UserActivity.objects.filter(
user_profile = user.id,
client = get_client('ios')
)
self.assertTrue(len(activity_records), 1)
self.assertTrue(activity_records[0].count, 1)
def test_error_handling(self):
# type: () -> None
processed = []
@queue_processors.assign_queue('unreliable_worker')
class UnreliableWorker(queue_processors.QueueProcessingWorker):
def consume(self, data):
# type: (Mapping[str, Any]) -> None
if data["type"] == 'unexpected behaviour':
raise Exception('Worker task not performing as expected!')
processed.append(data["type"])
def _log_problem(self):
# type: () -> None
# keep the tests quiet
pass
fake_client = self.FakeClient()
for msg in ['good', 'fine', 'unexpected behaviour', 'back to normal']:
fake_client.queue.append(('unreliable_worker', {'type': msg}))
fn = os.path.join(settings.QUEUE_ERROR_DIR, 'unreliable_worker.errors')
try:
os.remove(fn)
except OSError: # nocoverage # error handling for the directory not existing
pass
with simulated_queue_client(lambda: fake_client):
worker = UnreliableWorker()
worker.setup()
worker.start()
self.assertEqual(processed, ['good', 'fine', 'back to normal'])
line = open(fn).readline().strip()
event = ujson.loads(line.split('\t')[1])
self.assertEqual(event["type"], 'unexpected behaviour')
def test_worker_noname(self):
# type: () -> None
class TestWorker(queue_processors.QueueProcessingWorker):
def __init__(self):
# type: () -> None
super(TestWorker, self).__init__()
def consume(self, data):
# type: (Mapping[str, Any]) -> None
pass # nocoverage # this is intentionally not called
with self.assertRaises(queue_processors.WorkerDeclarationException):
TestWorker()
def test_worker_noconsume(self):
# type: () -> None
@queue_processors.assign_queue('test_worker')
class TestWorker(queue_processors.QueueProcessingWorker):
def __init__(self):
# type: () -> None
super(TestWorker, self).__init__()
with self.assertRaises(queue_processors.WorkerDeclarationException):
worker = TestWorker()
worker.consume({})
| amanharitsh123/zulip | zerver/tests/test_queue_worker.py | Python | apache-2.0 | 5,307 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2014 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import datetime
import json
import re
from six.moves import urllib
import httmock
import requests
import six
from girder.constants import SettingKey
from tests import base
def setUpModule():
base.enabledPlugins.append('oauth')
base.startServer()
def tearDownModule():
base.stopServer()
class OauthTest(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
# girder.plugins is not available until setUp is running
global PluginSettings
from girder.plugins.oauth.constants import PluginSettings
self.adminUser = self.model('user').createUser(
email='[email protected]',
login='admin',
firstName='first',
lastName='last',
password='password',
admin=True
)
# Specifies which test account (typically "new" or "existing") a
# redirect to a provider will simulate authentication for
self.accountType = None
def testDeriveLogin(self):
"""
Unit tests the _deriveLogin method of the provider classes.
"""
from girder.plugins.oauth.providers.base import ProviderBase
login = ProviderBase._deriveLogin('[email protected]', 'John', 'Doe')
self.assertEqual(login, 'johndoe')
login = ProviderBase._deriveLogin('[email protected]', 'A', 'B')
self.assertEqual(login, 'helloworldfoo')
login = ProviderBase._deriveLogin('[email protected]', 'A', 'B', 'user2')
self.assertEqual(login, 'user2')
login = ProviderBase._deriveLogin('[email protected]', 'A', 'B', 'admin')
self.assertEqual(login, 'admin1')
def _testOauth(self, providerInfo):
# Close registration to start off, and simulate a new user
self.model('setting').set(SettingKey.REGISTRATION_POLICY, 'closed')
self.accountType = 'new'
# We should get an empty listing when no providers are set up
params = {
'key': PluginSettings.PROVIDERS_ENABLED,
'value': []
}
resp = self.request(
'/system/setting', user=self.adminUser, method='PUT', params=params)
self.assertStatusOk(resp)
resp = self.request('/oauth/provider', exception=True, params={
'redirect': 'http://localhost/#foo/bar',
'list': True
})
self.assertStatusOk(resp)
self.assertFalse(resp.json)
# Turn on provider, but don't set other settings
params = {
'list': json.dumps([{
'key': PluginSettings.PROVIDERS_ENABLED,
'value': [providerInfo['id']]
}])
}
resp = self.request(
'/system/setting', user=self.adminUser, method='PUT', params=params)
self.assertStatusOk(resp)
resp = self.request('/oauth/provider', exception=True, params={
'redirect': 'http://localhost/#foo/bar'})
self.assertStatus(resp, 500)
# Set up provider normally
params = {
'list': json.dumps([
{
'key': PluginSettings.PROVIDERS_ENABLED,
'value': [providerInfo['id']]
}, {
'key': providerInfo['client_id']['key'],
'value': providerInfo['client_id']['value']
}, {
'key': providerInfo['client_secret']['key'],
'value': providerInfo['client_secret']['value']
}
])
}
resp = self.request(
'/system/setting', user=self.adminUser, method='PUT',
params=params)
self.assertStatusOk(resp)
# No need to re-fetch and test all of these settings values; they will
# be implicitly tested later
# Make sure that if no list param is passed, we receive the old format
resp = self.request('/oauth/provider', params={
'redirect': 'http://localhost/#foo/bar'
})
self.assertStatusOk(resp)
self.assertIsInstance(resp.json, dict)
self.assertEqual(len(resp.json), 1)
self.assertIn(providerInfo['name'], resp.json)
self.assertRegexpMatches(
resp.json[providerInfo['name']],
providerInfo['url_re'])
# This will need to be called several times, to get fresh tokens
def getProviderResp():
resp = self.request('/oauth/provider', params={
'redirect': 'http://localhost/#foo/bar',
'list': True
})
self.assertStatusOk(resp)
self.assertIsInstance(resp.json, list)
self.assertEqual(len(resp.json), 1)
providerResp = resp.json[0]
self.assertSetEqual(
set(six.viewkeys(providerResp)),
{'id', 'name', 'url'})
self.assertEqual(providerResp['id'], providerInfo['id'])
self.assertEqual(providerResp['name'], providerInfo['name'])
self.assertRegexpMatches(
providerResp['url'],
providerInfo['url_re'])
redirectParams = urllib.parse.parse_qs(
urllib.parse.urlparse(providerResp['url']).query)
csrfTokenParts = redirectParams['state'][0].partition('.')
token = self.model('token').load(
csrfTokenParts[0], force=True, objectId=False)
self.assertLess(
token['expires'],
datetime.datetime.utcnow() + datetime.timedelta(days=0.30))
self.assertEqual(
csrfTokenParts[2],
'http://localhost/#foo/bar')
return providerResp
# Try the new format listing
getProviderResp()
# Try callback, for a non-existant provider
resp = self.request('/oauth/foobar/callback')
self.assertStatus(resp, 400)
# Try callback, without providing any params
resp = self.request('/oauth/%s/callback' % providerInfo['id'])
self.assertStatus(resp, 400)
# Try callback, providing params as though the provider failed
resp = self.request(
'/oauth/%s/callback' % providerInfo['id'],
params={
'code': None,
'error': 'some_custom_error',
}, exception=True)
self.assertStatus(resp, 502)
self.assertEqual(
resp.json['message'],
"Provider returned error: 'some_custom_error'.")
# This will need to be called several times, to use fresh tokens
def getCallbackParams(providerResp):
resp = requests.get(providerResp['url'], allow_redirects=False)
self.assertEqual(resp.status_code, 302)
callbackLoc = urllib.parse.urlparse(resp.headers['location'])
self.assertEqual(
callbackLoc.path,
r'/api/v1/oauth/%s/callback' % providerInfo['id'])
callbackLocQuery = urllib.parse.parse_qs(callbackLoc.query)
self.assertNotHasKeys(callbackLocQuery, ('error',))
callbackParams = {
key: val[0] for key, val in six.viewitems(callbackLocQuery)
}
return callbackParams
# Call (simulated) external provider
getCallbackParams(getProviderResp())
# Try callback, with incorrect CSRF token
params = getCallbackParams(getProviderResp())
params['state'] = 'something_wrong'
resp = self.request('/oauth/%s/callback' % providerInfo['id'],
params=params)
self.assertStatus(resp, 403)
self.assertTrue(
resp.json['message'].startswith('Invalid CSRF token'))
# Try callback, with expired CSRF token
params = getCallbackParams(getProviderResp())
token = self.model('token').load(
params['state'].partition('.')[0], force=True, objectId=False)
token['expires'] -= datetime.timedelta(days=1)
self.model('token').save(token)
resp = self.request('/oauth/%s/callback' % providerInfo['id'],
params=params)
self.assertStatus(resp, 403)
self.assertTrue(
resp.json['message'].startswith('Expired CSRF token'))
# Try callback, with a valid CSRF token but no redirect
params = getCallbackParams(getProviderResp())
params['state'] = params['state'].partition('.')[0]
resp = self.request('/oauth/%s/callback' % providerInfo['id'],
params=params)
self.assertStatus(resp, 400)
self.assertTrue(
resp.json['message'].startswith('No redirect location'))
# Try callback, with incorrect code
params = getCallbackParams(getProviderResp())
params['code'] = 'something_wrong'
resp = self.request('/oauth/%s/callback' % providerInfo['id'],
params=params)
self.assertStatus(resp, 502)
# Try callback, with real parameters from provider, but still for the
# 'new' account
params = getCallbackParams(getProviderResp())
resp = self.request('/oauth/%s/callback' % providerInfo['id'],
params=params)
self.assertStatus(resp, 400)
self.assertTrue(
resp.json['message'].startswith(
'Registration on this instance is closed.'))
# This will need to be called several times, and will do a normal login
def doOauthLogin(accountType):
self.accountType = accountType
params = getCallbackParams(getProviderResp())
resp = self.request('/oauth/%s/callback' % providerInfo['id'],
params=params, isJson=False)
self.assertStatus(resp, 303)
self.assertEqual(resp.headers['Location'],
'http://localhost/#foo/bar')
self.assertTrue('girderToken' in resp.cookie)
resp = self.request('/user/me',
token=resp.cookie['girderToken'].value)
self.assertStatusOk(resp)
self.assertEqual(resp.json['email'],
providerInfo['accounts'][accountType]['user']['email'])
self.assertEqual(resp.json['login'],
providerInfo['accounts'][accountType]['user']['login'])
self.assertEqual(resp.json['firstName'],
providerInfo['accounts'][accountType]['user']['firstName'])
self.assertEqual(resp.json['lastName'],
providerInfo['accounts'][accountType]['user']['lastName'])
# Try callback for the 'existing' account, which should succeed
doOauthLogin('existing')
# Try callback for the 'new' account, with open registration
self.model('setting').set(SettingKey.REGISTRATION_POLICY, 'open')
doOauthLogin('new')
# Password login for 'new' OAuth-only user should fail gracefully
newUser = providerInfo['accounts']['new']['user']
resp = self.request('/user/authentication',
basicAuth='%s:mypasswd' % newUser['login'])
self.assertStatus(resp, 400)
self.assertTrue(
resp.json['message'].startswith('You don\'t have a password.'))
# Reset password for 'new' OAuth-only user should work
self.assertTrue(base.mockSmtp.isMailQueueEmpty())
resp = self.request('/user/password/temporary',
method='PUT', params={
'email': providerInfo['accounts']['new']['user']['email']})
self.assertStatusOk(resp)
self.assertEqual(resp.json['message'], 'Sent temporary access email.')
self.assertTrue(base.mockSmtp.waitForMail())
msg = base.mockSmtp.getMail()
# Pull out the auto-generated token from the email
search = re.search('<a href="(.*)">', msg)
link = search.group(1)
linkParts = link.split('/')
userId = linkParts[-3]
tokenId = linkParts[-1]
tempToken = self.model('token').load(
tokenId, force=True, objectId=False)
resp = self.request('/user/password/temporary/' + userId,
method='GET', params={
'token': tokenId})
self.assertStatusOk(resp)
self.assertEqual(resp.json['user']['login'], newUser['login'])
# We should now be able to change the password
resp = self.request('/user/password',
method='PUT', user=resp.json['user'], params={
'old': tokenId,
'new': 'mypasswd'})
self.assertStatusOk(resp)
# The temp token should get deleted on password change
token = self.model('token').load(tempToken, force=True, objectId=False)
self.assertEqual(token, None)
# Password login for 'new' OAuth-only user should now succeed
resp = self.request('/user/authentication',
basicAuth='%s:mypasswd' % newUser['login'])
self.assertStatusOk(resp)
@httmock.all_requests
def mockOtherRequest(self, url, request):
raise Exception('Unexpected url %s' % str(request.url))
def testGoogleOauth(self):
providerInfo = {
'id': 'google',
'name': 'Google',
'client_id': {
'key': PluginSettings.GOOGLE_CLIENT_ID,
'value': 'google_test_client_id'
},
'client_secret': {
'key': PluginSettings.GOOGLE_CLIENT_SECRET,
'value': 'google_test_client_secret'
},
'allowed_callback_re':
r'^http://127\.0\.0\.1(?::\d+)?/api/v1/oauth/google/callback$',
'url_re': r'^https://accounts\.google\.com/o/oauth2/auth',
'accounts': {
'existing': {
'auth_code': 'google_existing_auth_code',
'access_token': 'google_existing_test_token',
'user': {
'login': self.adminUser['login'],
'email': self.adminUser['email'],
'firstName': self.adminUser['firstName'],
'lastName': self.adminUser['lastName'],
'oauth': {
'provider': 'google',
'id': '5326'
}
}
},
'new': {
'auth_code': 'google_new_auth_code',
'access_token': 'google_new_test_token',
'user': {
# this login will be created internally by _deriveLogin
'login': 'googleuser',
'email': '[email protected]',
'firstName': 'John',
'lastName': 'Doe',
'oauth': {
'provider': 'google',
'id': '9876'
}
}
}
}
}
@httmock.urlmatch(scheme='https', netloc='^accounts.google.com$',
path='^/o/oauth2/auth$', method='GET')
def mockGoogleRedirect(url, request):
try:
params = urllib.parse.parse_qs(url.query)
self.assertEqual(
params['response_type'],
['code'])
self.assertEqual(
params['access_type'],
['online'])
self.assertEqual(
params['scope'],
['profile email'])
except (KeyError, AssertionError) as e:
return {
'status_code': 400,
'content': json.dumps({
'error': repr(e)
})
}
try:
self.assertEqual(
params['client_id'],
[providerInfo['client_id']['value']])
except (KeyError, AssertionError) as e:
return {
'status_code': 401,
'content': json.dumps({
'error': repr(e)
})
}
try:
self.assertRegexpMatches(
params['redirect_uri'][0],
providerInfo['allowed_callback_re'])
state = params['state'][0]
# Nothing to test for state, since provider doesn't care
except (KeyError, AssertionError) as e:
return {
'status_code': 400,
'content': json.dumps({
'error': repr(e)
})
}
returnQuery = urllib.parse.urlencode({
'state': state,
'code': providerInfo['accounts'][self.accountType]['auth_code']
})
return {
'status_code': 302,
'headers': {
'Location': '%s?%s' % (params['redirect_uri'][0],
returnQuery)
}
}
@httmock.urlmatch(scheme='https', netloc='^accounts.google.com$',
path='^/o/oauth2/token$', method='POST')
def mockGoogleToken(url, request):
try:
params = urllib.parse.parse_qs(request.body)
self.assertEqual(
params['client_id'],
[providerInfo['client_id']['value']])
except (KeyError, AssertionError) as e:
return {
'status_code': 401,
'content': json.dumps({
'error': repr(e)
})
}
try:
self.assertEqual(
params['grant_type'],
['authorization_code'])
self.assertEqual(
params['client_secret'],
[providerInfo['client_secret']['value']])
self.assertRegexpMatches(
params['redirect_uri'][0],
providerInfo['allowed_callback_re'])
for account in six.viewvalues(providerInfo['accounts']):
if account['auth_code'] == params['code'][0]:
break
else:
self.fail()
except (KeyError, AssertionError) as e:
return {
'status_code': 400,
'content': json.dumps({
'error': repr(e)
})
}
return json.dumps({
'token_type': 'Bearer',
'access_token': account['access_token'],
'expires_in': 3546,
'id_token': 'google_id_token'
})
@httmock.urlmatch(scheme='https', netloc='^www.googleapis.com$',
path='^/plus/v1/people/me$', method='GET')
def mockGoogleApi(url, request):
try:
for account in six.viewvalues(providerInfo['accounts']):
if 'Bearer %s' % account['access_token'] == \
request.headers['Authorization']:
break
else:
self.fail()
params = urllib.parse.parse_qs(url.query)
self.assertSetEqual(
set(params['fields'][0].split(',')),
{'id', 'emails', 'name'})
except AssertionError as e:
return {
'status_code': 401,
'content': json.dumps({
'error': repr(e)
})
}
return json.dumps({
'id': account['user']['oauth']['id'],
'name': {
'givenName': account['user']['firstName'],
'familyName': account['user']['lastName']
},
'emails': [
{
'type': 'other',
'value': '[email protected]'
}, {
'type': 'account',
'value': account['user']['email']
}
]
})
with httmock.HTTMock(
mockGoogleRedirect,
mockGoogleToken,
mockGoogleApi,
# Must keep "mockOtherRequest" last
self.mockOtherRequest
):
self._testOauth(providerInfo)
def testGithubOauth(self):
providerInfo = {
'id': 'github',
'name': 'GitHub',
'client_id': {
'key': PluginSettings.GITHUB_CLIENT_ID,
'value': 'github_test_client_id'
},
'client_secret': {
'key': PluginSettings.GITHUB_CLIENT_SECRET,
'value': 'github_test_client_secret'
},
'allowed_callback_re':
r'^http://127\.0\.0\.1(?::\d+)?/api/v1/oauth/github/callback$',
'url_re': r'^https://github\.com/login/oauth/authorize',
'accounts': {
'existing': {
'auth_code': 'github_existing_auth_code',
'access_token': 'github_existing_test_token',
'user': {
'login': self.adminUser['login'],
'email': self.adminUser['email'],
'firstName': self.adminUser['firstName'],
'lastName': self.adminUser['lastName'],
'oauth': {
'provider': 'github',
'id': '2399'
}
}
},
'new': {
'auth_code': 'github_new_auth_code',
'access_token': 'github_new_test_token',
'user': {
# login may be provided externally by GitHub; for
# simplicity here, do not use a username with whitespace
# or underscores
'login': 'jane83',
'email': '[email protected]',
'firstName': 'Jane',
'lastName': 'Doe',
'oauth': {
'provider': 'github',
'id': 1234
}
}
}
}
}
@httmock.urlmatch(scheme='https', netloc='^github.com$',
path='^/login/oauth/authorize$', method='GET')
def mockGithubRedirect(url, request):
redirectUri = None
try:
params = urllib.parse.parse_qs(url.query)
# Check redirect_uri first, so other errors can still redirect
redirectUri = params['redirect_uri'][0]
self.assertEqual(
params['client_id'],
[providerInfo['client_id']['value']])
except (KeyError, AssertionError) as e:
return {
'status_code': 404,
'content': json.dumps({
'error': repr(e)
})
}
try:
self.assertRegexpMatches(
redirectUri,
providerInfo['allowed_callback_re'])
state = params['state'][0]
# Nothing to test for state, since provider doesn't care
self.assertEqual(
params['scope'],
['user:email'])
except (KeyError, AssertionError) as e:
returnQuery = urllib.parse.urlencode({
'error': repr(e),
})
else:
returnQuery = urllib.parse.urlencode({
'state': state,
'code': providerInfo['accounts'][self.accountType]['auth_code']
})
return {
'status_code': 302,
'headers': {
'Location': '%s?%s' % (redirectUri, returnQuery)
}
}
@httmock.urlmatch(scheme='https', netloc='^github.com$',
path='^/login/oauth/access_token$', method='POST')
def mockGithubToken(url, request):
try:
self.assertEqual(request.headers['Accept'], 'application/json')
params = urllib.parse.parse_qs(request.body)
self.assertEqual(
params['client_id'],
[providerInfo['client_id']['value']])
except (KeyError, AssertionError) as e:
return {
'status_code': 404,
'content': json.dumps({
'error': repr(e)
})
}
try:
for account in six.viewvalues(providerInfo['accounts']):
if account['auth_code'] == params['code'][0]:
break
else:
self.fail()
self.assertEqual(
params['client_secret'],
[providerInfo['client_secret']['value']])
self.assertRegexpMatches(
params['redirect_uri'][0],
providerInfo['allowed_callback_re'])
except (KeyError, AssertionError) as e:
returnBody = json.dumps({
'error': repr(e),
'error_description': repr(e)
})
else:
returnBody = json.dumps({
'token_type': 'bearer',
'access_token': account['access_token'],
'scope': 'user:email'
})
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json'
},
'content': returnBody
}
@httmock.urlmatch(scheme='https', netloc='^api.github.com$',
path='^/user$', method='GET')
def mockGithubApiUser(url, request):
try:
for account in six.viewvalues(providerInfo['accounts']):
if 'token %s' % account['access_token'] == \
request.headers['Authorization']:
break
else:
self.fail()
except AssertionError as e:
return {
'status_code': 401,
'content': json.dumps({
'message': repr(e)
})
}
return json.dumps({
'id': account['user']['oauth']['id'],
'login': account['user']['login'],
'name': '%s %s' % (account['user']['firstName'],
account['user']['lastName'])
})
@httmock.urlmatch(scheme='https', netloc='^api.github.com$',
path='^/user/emails$', method='GET')
def mockGithubApiEmail(url, request):
try:
for account in six.viewvalues(providerInfo['accounts']):
if 'token %s' % account['access_token'] == \
request.headers['Authorization']:
break
else:
self.fail()
except AssertionError as e:
return {
'status_code': 401,
'content': json.dumps({
'message': repr(e)
})
}
return json.dumps([
{
'primary': False,
'email': '[email protected]',
'verified': True
}, {
'primary': True,
'email': account['user']['email'],
'verified': True
}
])
with httmock.HTTMock(
mockGithubRedirect,
mockGithubToken,
mockGithubApiUser,
mockGithubApiEmail,
# Must keep "mockOtherRequest" last
self.mockOtherRequest
):
self._testOauth(providerInfo)
| opadron/girder | plugins/oauth/plugin_tests/oauth_test.py | Python | apache-2.0 | 29,618 |
# #!/usr/bin/env python
#
# import nlopt # THIS IS NOT A PACKAGE!
# import numpy as np
#
# print(('nlopt version='+nlopt.__version__))
#
# def f(x, grad):
# F=x[0]
# L=x[1]
# E=x[2]
# I=x[3]
# D=F*L**3/(3.*E*I)
# return D
#
# n = 4
# opt = nlopt.opt(nlopt.LN_COBYLA, n)
# opt.set_min_objective(f)
# lb = np.array([40., 50., 30e3, 1.])
# ub = np.array([60., 60., 40e3, 10.])
# x = (lb+ub)/2.
# opt.set_lower_bounds(lb)
# opt.set_upper_bounds(ub)
# opt.set_xtol_rel(1e-3)
# opt.set_ftol_rel(1e-3)
# xopt = opt.optimize(x)
#
# opt_val = opt.last_optimum_value()
# result = opt.last_optimize_result()
# print(('opt_result='+str(result)))
# print(('optimizer='+str(xopt)))
# print(('opt_val='+str(opt_val)))
| PMBio/limix | External/nlopt/test/test_std.py | Python | apache-2.0 | 727 |
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.vm_util import POLL_INTERVAL
FLAGS = flags.FLAGS
flags.DEFINE_string('openstack_auth_url',
os.environ.get('OS_AUTH_URL', 'http://localhost:5000'),
('Url for Keystone authentication service, defaults to '
'$OS_AUTH_URL. Required for discovery of other OpenStack '
'service URLs.'))
flags.DEFINE_string('openstack_username',
os.getenv('OS_USERNAME', 'admin'),
'OpenStack login username, defaults to $OS_USERNAME.')
flags.DEFINE_string('openstack_tenant',
os.getenv('OS_TENANT_NAME', 'admin'),
'OpenStack tenant name, defaults to $OS_TENANT_NAME.')
flags.DEFINE_string('openstack_password_file',
os.getenv('OPENSTACK_PASSWORD_FILE',
'~/.config/openstack-password.txt'),
'Path to file containing the openstack password, '
'defaults to $OPENSTACK_PASSWORD_FILE. Alternatively, '
'setting the password itself in $OS_PASSWORD is also '
'supported.')
flags.DEFINE_string('openstack_nova_endpoint_type',
os.getenv('NOVA_ENDPOINT_TYPE', 'publicURL'),
'OpenStack Nova endpoint type, '
'defaults to $NOVA_ENDPOINT_TYPE.')
class KeystoneAuth(object):
"""
Usage example:
auth = KeystoneAuth(auth_url, auth_tenant, auth_user, auth_password)
token = auth.get_token()
tenant_id = auth.get_tenant_id()
token and tenant_id are required to use all OpenStack python clients
"""
def __init__(self, url, tenant, user, password):
self.__url = url
self.__tenant = tenant
self.__user = user
self.__password = password
self.__connection = None
self.__session = None
def GetConnection(self):
if self.__connection is None:
self.__authenticate()
return self.__connection
def __authenticate(self):
import keystoneclient.v2_0.client as ksclient
self.__connection = ksclient.Client(
auth_url=self.__url,
username=self.__user,
password=self.__password,
tenant=self.__tenant)
self.__connection.authenticate()
def get_token(self):
return self.GetConnection().get_token(self.__session)
def get_tenant_id(self):
raw_token = self.GetConnection().get_raw_token_from_identity_service(
auth_url=self.__url,
username=self.__user,
password=self.__password,
tenant_name=self.__tenant
)
return raw_token['token']['tenant']['id']
class NovaClient(object):
def __getattribute__(self, item):
try:
return super(NovaClient, self).__getattribute__(item)
except AttributeError:
return self.__client.__getattribute__(item)
def GetPassword(self):
# For compatibility with Nova CLI, use 'OS'-prefixed environment value
# if present. Also support reading the password from a file.
error_msg = ('No OpenStack password specified. '
'Either set the environment variable OS_PASSWORD to the '
'admin password, or provide the name of a file '
'containing the password using the OPENSTACK_PASSWORD_FILE '
'environment variable or --openstack_password_file flag.')
password = os.getenv('OS_PASSWORD')
if password is not None:
return password
try:
with open(os.path.expanduser(FLAGS.openstack_password_file)) as pwfile:
password = pwfile.readline().rstrip()
return password
except IOError as e:
raise Exception(error_msg + ' ' + str(e))
raise Exception(error_msg)
def __init__(self):
from novaclient import client as noclient
self.url = FLAGS.openstack_auth_url
self.user = FLAGS.openstack_username
self.tenant = FLAGS.openstack_tenant
self.endpoint_type = FLAGS.openstack_nova_endpoint_type
self.password = self.GetPassword()
self.__auth = KeystoneAuth(self.url, self.tenant,
self.user, self.password)
self.__client = noclient.Client('2',
auth_url=self.url,
username=self.user,
auth_token=self.__auth.get_token(),
tenant_id=self.__auth.get_tenant_id(),
endpoint_type=self.endpoint_type,
)
def reconnect(self):
from novaclient import client as noclient
self.__auth = KeystoneAuth(self.url, self.tenant, self.user,
self.password)
self.__client = noclient.Client('2',
auth_url=self.url,
username=self.user,
auth_token=self.__auth.get_token(),
tenant_id=self.__auth.get_tenant_id(),
endpoint_type=self.endpoint_type,
)
class AuthException(Exception):
"""Wrapper for NovaClient auth exceptions."""
pass
def retry_authorization(max_retries=1, poll_interval=POLL_INTERVAL):
def decored(function):
@vm_util.Retry(max_retries=max_retries,
poll_interval=poll_interval,
retryable_exceptions=AuthException,
log_errors=False)
@functools.wraps(function)
def decor(*args, **kwargs):
from novaclient.exceptions import Unauthorized
try:
return function(*args, **kwargs)
except Unauthorized as e:
NovaClient.instance.reconnect()
raise AuthException(str(e))
return decor
return decored
| syed/PerfKitBenchmarker | perfkitbenchmarker/openstack/utils.py | Python | apache-2.0 | 6,884 |
# -*- coding: utf-8 -*-
#imports
from linkedin import linkedin
import easygui
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import requests
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
Mode = enum('PREVIEW', 'EDIT', 'REFRESH')
mode = 0
paramslist = []
key = ''
i = 0
msg = "Enter Required Information"
title = "Linkedin Extractor"
fieldNames = ["Consumer Key","Consumer Secret",
"User Key","User Secret"]
fieldValues = [] # we start with blanks for the values
for i in range(4):
fieldValues.append(i)
for i in range(len(sys.argv)):
if str(sys.argv[i]).lower() == "-mode" and (i + 1) < len(sys.argv):
if str(sys.argv[i + 1]).lower() == "preview":
mode = Mode.PREVIEW
elif str(sys.argv[i + 1]).lower() == "edit":
mode = Mode.EDIT
elif str(sys.argv[i + 1]).lower() == "refresh":
mode = Mode.REFRESH
elif str(sys.argv[i]).lower() == "-size":
size = int(sys.argv[i + 1])
elif str(sys.argv[i]).lower() == "-params":
params = str(sys.argv[i + 1])
paramslist = params.split(';')
i += 1
def setArgs(fieldValues):
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
USER_TOKEN = ''
USER_SECRET = ''
RETURN_URL = '' # Not required for developer authentication
fieldValues[0] = ''
fieldValues[1] = ''
fieldValues[2] = ''
fieldValues[3] = ''
return fieldValues
def parseArgs(fieldValues):
#if paramslist is None: break
for i in range(len(paramslist)):
if paramslist[i].split('=')[0].lower() == 'consumer_key':
try:
fieldValues[0] = paramslist[i].split('=')[1].decode('hex')
except:
fieldValues[0] = 'ENTER_CONSUMER_KEY'
elif paramslist[i].split('=')[0].lower() == 'consumer_secret':
try:
fieldValues[1] = paramslist[i].split('=')[1].decode('hex')
except:
fieldValues[1] = 'ENTER_CONSUMER_SECRET'
elif paramslist[i].split('=')[0].lower() == 'user_token':
try:
fieldValues[2] = paramslist[i].split('=')[1].decode('hex')
except:
fieldValues[2] = 'ENTER_USER_TOKEN'
elif paramslist[i].split('=')[0].lower() == 'user_secret':
try:
fieldValues[3] = paramslist[i].split('=')[1].decode('hex')
except:
fieldValues[3] = 'ENTER_USER_SECRET'
i += 1
return fieldValues
def getScreenInput(fieldValues):
fieldValues = easygui.multenterbox(msg = msg, title = title, fields = fieldNames, values = fieldValues )
# make sure that none of the fields was left blank
while 1:
if fieldValues == None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg += ('"%s" is a required field.\n\n' % fieldNames[i])
if errmsg == "":
break # no problems found
fieldValues = easygui.multenterbox(errmsg, title, fieldNames, fieldValues)
return fieldValues
def printData(fieldValues):
if fieldValues != None:
CONSUMER_KEY = fieldValues[0]
CONSUMER_SECRET = fieldValues[1]
USER_TOKEN = fieldValues[2]
USER_SECRET = fieldValues[3]
RETURN_URL = ''
print "beginDSInfo"
print """fileName;#;true
csv_first_row_has_column_names;true;true;
csv_separator;|;true
csv_number_grouping;,;true
csv_number_decimal;.;true
csv_date_format;d.M.yyyy;true"""
print ''.join(['consumer_key;', str(fieldValues[0]).encode('hex'), ';true'])
print ''.join(['consumer_secret;', str(fieldValues[1]).encode('hex'), ';true'])
print ''.join(['user_token;', str(fieldValues[2]).encode('hex'), ';true'])
print ''.join(['user_secret;', str(fieldValues[3]).encode('hex'), ';true'])
print "endDSInfo"
print "beginData"
print 'First_Name, Last_Name, Location'
#try:
# Instantiate the developer authentication class
auth = linkedin.LinkedInDeveloperAuthentication(CONSUMER_KEY, CONSUMER_SECRET,
USER_TOKEN, USER_SECRET,
RETURN_URL,
permissions=linkedin.PERMISSIONS.enums.values())
# Pass it in to the app...
app = linkedin.LinkedInApplication(auth)
try:
connections = app.get_connections()
except requests.ConnectionError:
easygui.msgbox('Connection Error, Extension Doesnt Support Proxies Yet')
#print connections
for c in connections['values']:
#if c.has_key('location')]
try:
print ''.join([c['firstName'].replace(',', ''), ',']),
except:
print ''.join(['None', ', ']),
try:
print ''.join([c['lastName'].replace(',', ''), ',']),
except:
print ''.join(['None', ', ']),
try:
print ''.join([c['location']['name'].replace(',', '')])
except:
print ''.join(['None'])
print "endData"
else:
print "beginDSInfo"
print "endDSInfo"
print "beginData"
print """Error
User Cancelled"""
print "endData"
if mode == Mode.PREVIEW:
fieldValues = setArgs(fieldValues)
#easygui.textbox(msg = 'preview1', text = sys.argv)
fieldValues = getScreenInput(fieldValues)
#easygui.textbox(msg = 'preview2', text = fieldValues)
printData(fieldValues)
elif mode == Mode.EDIT:
#easygui.textbox(msg = 'edit1', text = sys.argv)
fieldValues = parseArgs(fieldValues)
#easygui.textbox(msg = 'edit2', text = fieldValues)
fieldValues = getScreenInput(fieldValues)
#easygui.textbox(msg = 'edit2', text = fieldValues)
printData(fieldValues)
elif mode == Mode.REFRESH:
fieldValues = parseArgs(fieldValues)
#easygui.textbox(msg = 'refresh1', text = sys.argv)
printData(fieldValues)
| SAP/lumira-extension-da-linkedin | source/LinkedinExtractor.py | Python | apache-2.0 | 6,132 |
"""Support for Lutron Caseta scenes."""
from typing import Any
from homeassistant.components.scene import Scene
from .const import BRIDGE_LEAP, DOMAIN as CASETA_DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Lutron Caseta scene platform.
Adds scenes from the Caseta bridge associated with the config_entry as
scene entities.
"""
entities = []
data = hass.data[CASETA_DOMAIN][config_entry.entry_id]
bridge = data[BRIDGE_LEAP]
scenes = bridge.get_scenes()
for scene in scenes:
entity = LutronCasetaScene(scenes[scene], bridge)
entities.append(entity)
async_add_entities(entities, True)
class LutronCasetaScene(Scene):
"""Representation of a Lutron Caseta scene."""
def __init__(self, scene, bridge):
"""Initialize the Lutron Caseta scene."""
self._scene_name = scene["name"]
self._scene_id = scene["scene_id"]
self._bridge = bridge
@property
def name(self):
"""Return the name of the scene."""
return self._scene_name
async def async_activate(self, **kwargs: Any) -> None:
"""Activate the scene."""
await self._bridge.activate_scene(self._scene_id)
| turbokongen/home-assistant | homeassistant/components/lutron_caseta/scene.py | Python | apache-2.0 | 1,243 |
#!/usr/bin/env python
###
# This script sets up a Spark cluster on Google Compute Engine
# Sigmoidanalytics.com
###
from __future__ import with_statement
import logging
import os
import pipes
import random
import shutil
import subprocess
import sys
import tempfile
import time
import commands
import urllib2
from optparse import OptionParser
from sys import stderr
import shlex
import getpass
import threading
import json
###
# Make sure gcutil is installed and authenticated
# Usage: spark_gce.py <project> <no-slaves> <slave-type> <master-type> <identity-file> <zone> <cluster-name>
# Usage: spark_gce.py <project> <cluster-name> destroy
###
identity_file = ""
slave_no = ""
slave_type = ""
master_type = ""
zone = ""
cluster_name = ""
username = ""
project = ""
def read_args():
global identity_file
global slave_no
global slave_type
global master_type
global zone
global cluster_name
global username
global project
if len(sys.argv) == 8:
project = sys.argv[1]
slave_no = int(sys.argv[2])
slave_type = sys.argv[3]
master_type = sys.argv[4]
identity_file = sys.argv[5]
zone = sys.argv[6]
cluster_name = sys.argv[7]
username = getpass.getuser()
elif len(sys.argv) == 4 and sys.argv[3].lower() == "destroy":
print 'Destroying cluster ' + sys.argv[2]
project = sys.argv[1]
cluster_name = sys.argv[2]
try:
command = 'gcloud compute --project ' + project + ' instances list --format json'
output = subprocess.check_output(command, shell=True)
data = json.loads(output)
master_nodes=[]
slave_nodes=[]
for instance in data:
try:
host_name = instance['name']
host_ip = instance['networkInterfaces'][0]['accessConfigs'][0]['natIP']
if host_name == cluster_name + '-master':
command = 'gcloud compute instances delete ' + host_name + ' --project ' + project
command = shlex.split(command)
subprocess.call(command)
elif cluster_name + '-slave' in host_name:
command = 'gcloud compute instances delete ' + host_name + ' --project ' + project
command = shlex.split(command)
subprocess.call(command)
except:
pass
except:
print "Failed to Delete instances"
sys.exit(1)
sys.exit(0)
else:
print '# Usage: spark_gce.py <project> <no-slaves> <slave-type> <master-type> <identity-file> <zone> <cluster-name>'
print '# Usage: spark_gce.py <project> <cluster-name> destroy'
sys.exit(0)
def setup_network():
print '[ Setting up Network & Firewall Entries ]'
try:
command = 'gcloud compute --project=' + project + ' networks create "' + cluster_name + '-network" --range "10.240.0.0/16"'
command = shlex.split(command)
subprocess.call(command)
#Uncomment the above and comment the below section if you don't want to open all ports for public.
command = 'gcloud compute firewall-rules delete internal --project '+ project
command = 'gcloud compute firewall-rules create internal --network ' + cluster_name + '-network --allow tcp udp icmp --project '+ project
command = shlex.split(command)
subprocess.call(command)
except OSError:
print "Failed to setup Network & Firewall. Exiting.."
sys.exit(1)
def launch_master():
print '[ Launching Master ]'
command = 'gcloud compute --project "' + project + '" instances create "' + cluster_name + '-master" --zone "' + zone + '" --machine-type "' + master_type + '" --network "' + cluster_name + '-network" --maintenance-policy "MIGRATE" --scopes "https://www.googleapis.com/auth/devstorage.read_only" --image "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20141218" --boot-disk-type "pd-standard" --boot-disk-device-name "' + cluster_name + '-md"'
command = shlex.split(command)
subprocess.call(command)
def launch_slaves():
print '[ Launching Slaves ]'
for s_id in range(1,slave_no+1):
command = 'gcloud compute --project "' + project + '" instances create "' + cluster_name + '-slave' + str(s_id) + '" --zone "' + zone + '" --machine-type "' + slave_type + '" --network "' + cluster_name + '-network" --maintenance-policy "MIGRATE" --scopes "https://www.googleapis.com/auth/devstorage.read_only" --image "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-6-v20141218" --boot-disk-type "pd-standard" --boot-disk-device-name "' + cluster_name + '-s' + str(s_id) + 'd"'
command = shlex.split(command)
subprocess.call(command)
def launch_cluster():
print '[ Creating the Cluster ]'
setup_network()
launch_master()
launch_slaves()
def check_gcloud():
myexec = "gcloud"
print '[ Verifying gcloud ]'
try:
subprocess.call([myexec, 'info'])
except OSError:
print "%s executable not found. \n# Make sure gcloud is installed and authenticated\nPlease follow https://cloud.google.com/compute/docs/gcloud-compute/" % myexec
sys.exit(1)
def get_cluster_ips():
command = 'gcloud compute --project ' + project + ' instances list --format json'
output = subprocess.check_output(command, shell=True)
data = json.loads(output)
master_nodes=[]
slave_nodes=[]
for instance in data:
try:
host_name = instance['name']
host_ip = instance['networkInterfaces'][0]['accessConfigs'][0]['natIP']
if host_name == cluster_name + '-master':
master_nodes.append(host_ip)
elif cluster_name + '-slave' in host_name:
slave_nodes.append(host_ip)
except:
pass
# Return all the instances
return (master_nodes, slave_nodes)
def enable_sudo(master,command):
'''
ssh_command(master,"echo \"import os\" > setuid.py ")
ssh_command(master,"echo \"import sys\" >> setuid.py")
ssh_command(master,"echo \"import commands\" >> setuid.py")
ssh_command(master,"echo \"command=sys.argv[1]\" >> setuid.py")
ssh_command(master,"echo \"os.setuid(os.geteuid())\" >> setuid.py")
ssh_command(master,"echo \"print commands.getstatusoutput(\"command\")\" >> setuid.py")
'''
os.system("ssh -i " + identity_file + " -t -o 'UserKnownHostsFile=/dev/null' -o 'CheckHostIP=no' -o 'StrictHostKeyChecking no' "+ username + "@" + master + " '" + command + "'")
def ssh_thread(host,command):
enable_sudo(host,command)
def install_java(master_nodes,slave_nodes):
print '[ Installing Java and Development Tools ]'
master = master_nodes[0]
master_thread = threading.Thread(target=ssh_thread, args=(master,"sudo yum install -y java-1.7.0-openjdk;sudo yum install -y java-1.7.0-openjdk-devel;sudo yum groupinstall \'Development Tools\' -y"))
master_thread.start()
#ssh_thread(master,"sudo yum install -y java-1.7.0-openjdk")
for slave in slave_nodes:
slave_thread = threading.Thread(target=ssh_thread, args=(slave,"sudo yum install -y java-1.7.0-openjdk;sudo yum install -y java-1.7.0-openjdk-devel;sudo yum groupinstall \'Development Tools\' -y"))
slave_thread.start()
#ssh_thread(slave,"sudo yum install -y java-1.7.0-openjdk")
slave_thread.join()
master_thread.join()
def ssh_command(host,command):
#print "ssh -i " + identity_file + " -o 'UserKnownHostsFile=/dev/null' -o 'CheckHostIP=no' -o 'StrictHostKeyChecking no' "+ username + "@" + host + " '" + command + "'"
commands.getstatusoutput("ssh -i " + identity_file + " -o 'UserKnownHostsFile=/dev/null' -o 'CheckHostIP=no' -o 'StrictHostKeyChecking no' "+ username + "@" + host + " '" + command + "'" )
def deploy_keys(master_nodes,slave_nodes):
print '[ Generating SSH Keys on Master ]'
key_file = os.path.basename(identity_file)
master = master_nodes[0]
ssh_command(master,"ssh-keygen -q -t rsa -N \"\" -f ~/.ssh/id_rsa")
ssh_command(master,"cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys")
os.system("scp -i " + identity_file + " -oUserKnownHostsFile=/dev/null -oCheckHostIP=no -oStrictHostKeyChecking=no -o 'StrictHostKeyChecking no' "+ identity_file + " " + username + "@" + master + ":")
ssh_command(master,"chmod 600 " + key_file)
ssh_command(master,"tar czf .ssh.tgz .ssh")
ssh_command(master,"ssh-keyscan -H $(/sbin/ifconfig eth0 | grep \"inet addr:\" | cut -d: -f2 | cut -d\" \" -f1) >> ~/.ssh/known_hosts")
ssh_command(master,"ssh-keyscan -H $(cat /etc/hosts | grep $(/sbin/ifconfig eth0 | grep \"inet addr:\" | cut -d: -f2 | cut -d\" \" -f1) | cut -d\" \" -f2) >> ~/.ssh/known_hosts")
print '[ Transfering SSH keys to slaves ]'
for slave in slave_nodes:
print commands.getstatusoutput("ssh -i " + identity_file + " -oUserKnownHostsFile=/dev/null -oCheckHostIP=no -oStrictHostKeyChecking=no " + username + "@" + master + " 'scp -i " + key_file + " -oStrictHostKeyChecking=no .ssh.tgz " + username +"@" + slave + ":'")
ssh_command(slave,"tar xzf .ssh.tgz")
ssh_command(master,"ssh-keyscan -H " + slave + " >> ~/.ssh/known_hosts")
ssh_command(slave,"ssh-keyscan -H $(cat /etc/hosts | grep $(/sbin/ifconfig eth0 | grep \"inet addr:\" | cut -d: -f2 | cut -d\" \" -f1) | cut -d\" \" -f2) >> ~/.ssh/known_hosts")
ssh_command(slave,"ssh-keyscan -H $(/sbin/ifconfig eth0 | grep \"inet addr:\" | cut -d: -f2 | cut -d\" \" -f1) >> ~/.ssh/known_hosts")
def attach_drive(master_nodes,slave_nodes):
print '[ Adding new 500GB drive on Master ]'
master = master_nodes[0]
command='gcloud compute --project="' + project + '" disks create "' + cluster_name + '-m-disk" --size 500GB --type "pd-standard" --zone ' + zone
command = shlex.split(command)
subprocess.call(command)
command = 'gcloud compute --project="' + project + '" instances attach-disk ' + cluster_name + '-master --device-name "' + cluster_name + '-m-disk" --disk ' + cluster_name + '-m-disk --zone ' + zone
command = shlex.split(command)
subprocess.call(command)
master_thread = threading.Thread(target=ssh_thread, args=(master,"sudo mkfs.ext3 /dev/disk/by-id/google-"+ cluster_name + "-m-disk " + " -F < /dev/null"))
master_thread.start()
print '[ Adding new 500GB drive on Slaves ]'
i = 1
for slave in slave_nodes:
master = slave
command='gcloud compute --project="' + project + '" disks create "' + cluster_name + '-s' + str(i) + '-disk" --size 500GB --type "pd-standard" --zone ' + zone
command = shlex.split(command)
subprocess.call(command)
command = 'gcloud compute --project="' + project + '" instances attach-disk ' + cluster_name + '-slave' + str(i) + ' --disk ' + cluster_name + '-s' + str(i) + '-disk --device-name "' + cluster_name + '-s' + str(i) + '-disk" --zone ' + zone
command = shlex.split(command)
subprocess.call(command)
slave_thread = threading.Thread(target=ssh_thread, args=(slave,"sudo mkfs.ext3 /dev/disk/by-id/google-" + cluster_name + "-s" + str(i) + "-disk -F < /dev/null"))
slave_thread.start()
i=i+1
slave_thread.join()
master_thread.join()
print '[ Mounting new Volume ]'
enable_sudo(master_nodes[0],"sudo mount /dev/disk/by-id/google-"+ cluster_name + "-m-disk /mnt")
enable_sudo(master_nodes[0],"sudo chown " + username + ":" + username + " /mnt")
i=1
for slave in slave_nodes:
enable_sudo(slave,"sudo mount /dev/disk/by-id/google-"+ cluster_name + "-s" + str(i) +"-disk /mnt")
enable_sudo(slave,"sudo chown " + username + ":" + username + " /mnt")
i=i+1
print '[ All volumns mounted, will be available at /mnt ]'
def setup_spark(master_nodes,slave_nodes):
print '[ Downloading Binaries ]'
master = master_nodes[0]
ssh_command(master,"rm -fr sigmoid")
ssh_command(master,"mkdir sigmoid")
ssh_command(master,"cd sigmoid;wget https://s3.amazonaws.com/sigmoidanalytics-builds/spark/1.2.0/spark-1.2.0-bin-cdh4.tgz")
ssh_command(master,"cd sigmoid;wget https://s3.amazonaws.com/sigmoidanalytics-builds/spark/0.9.1/gce/scala.tgz")
ssh_command(master,"cd sigmoid;tar zxf spark-1.2.0-bin-cdh4.tgz;rm spark-1.2.0-bin-cdh4.tgz")
ssh_command(master,"cd sigmoid;tar zxf scala.tgz;rm scala.tgz")
print '[ Updating Spark Configurations ]'
ssh_command(master,"cd sigmoid;cd spark-1.2.0-bin-cdh4/conf;cp spark-env.sh.template spark-env.sh")
ssh_command(master,"cd sigmoid;cd spark-1.2.0-bin-cdh4/conf;echo 'export SCALA_HOME=\"/home/`whoami`/sigmoid/scala\"' >> spark-env.sh")
ssh_command(master,"cd sigmoid;cd spark-1.2.0-bin-cdh4/conf;echo 'export SPARK_MEM=2454m' >> spark-env.sh")
ssh_command(master,"cd sigmoid;cd spark-1.2.0-bin-cdh4/conf;echo \"SPARK_JAVA_OPTS+=\\\" -Dspark.local.dir=/mnt/spark \\\"\" >> spark-env.sh")
ssh_command(master,"cd sigmoid;cd spark-1.2.0-bin-cdh4/conf;echo 'export SPARK_JAVA_OPTS' >> spark-env.sh")
ssh_command(master,"cd sigmoid;cd spark-1.2.0-bin-cdh4/conf;echo 'export SPARK_MASTER_IP=PUT_MASTER_IP_HERE' >> spark-env.sh")
ssh_command(master,"cd sigmoid;cd spark-1.2.0-bin-cdh4/conf;echo 'export MASTER=spark://PUT_MASTER_IP_HERE:7077' >> spark-env.sh")
ssh_command(master,"cd sigmoid;cd spark-1.2.0-bin-cdh4/conf;echo 'export JAVA_HOME=/usr/lib/jvm/java-1.7.0-openjdk.x86_64' >> spark-env.sh")
for slave in slave_nodes:
ssh_command(master,"echo " + slave + " >> sigmoid/spark-1.2.0-bin-cdh4/conf/slaves")
ssh_command(master,"sed -i \"s/PUT_MASTER_IP_HERE/$(/sbin/ifconfig eth0 | grep \"inet addr:\" | cut -d: -f2 | cut -d\" \" -f1)/g\" sigmoid/spark-1.2.0-bin-cdh4/conf/spark-env.sh")
ssh_command(master,"chmod +x sigmoid/spark-1.2.0-bin-cdh4/conf/spark-env.sh")
print '[ Rsyncing Spark to all slaves ]'
#Change permissions
enable_sudo(master,"sudo chown " + username + ":" + username + " /mnt")
i=1
for slave in slave_nodes:
enable_sudo(slave,"sudo chown " + username + ":" + username + " /mnt")
for slave in slave_nodes:
ssh_command(master,"rsync -za /home/" + username + "/sigmoid " + slave + ":")
ssh_command(slave,"mkdir /mnt/spark")
ssh_command(master,"mkdir /mnt/spark")
print '[ Starting Spark Cluster ]'
ssh_command(master,"sigmoid/spark-1.2.0-bin-cdh4/sbin/start-all.sh")
#setup_shark(master_nodes,slave_nodes)
setup_hadoop(master_nodes,slave_nodes)
print "\n\nSpark Master Started, WebUI available at : http://" + master + ":8080"
def setup_hadoop(master_nodes,slave_nodes):
master = master_nodes[0]
print '[ Downloading hadoop ]'
ssh_command(master,"cd sigmoid;wget https://s3.amazonaws.com/sigmoidanalytics-builds/hadoop/hadoop-2.0.0-cdh4.2.0.tar.gz")
ssh_command(master,"cd sigmoid;tar zxf hadoop-2.0.0-cdh4.2.0.tar.gz")
ssh_command(master,"cd sigmoid;rm hadoop-2.0.0-cdh4.2.0.tar.gz")
print '[ Configuring Hadoop ]'
#Configure .bashrc
ssh_command(master,"echo '#HADOOP_CONFS' >> .bashrc")
ssh_command(master,"echo 'export JAVA_HOME=/usr/lib/jvm/java-1.7.0-openjdk.x86_64' >> .bashrc")
ssh_command(master,"echo 'export HADOOP_INSTALL=/home/`whoami`/sigmoid/hadoop-2.0.0-cdh4.2.0' >> .bashrc")
ssh_command(master,"echo 'export PATH=$PATH:\$HADOOP_INSTALL/bin' >> .bashrc")
ssh_command(master,"echo 'export PATH=$PATH:\$HADOOP_INSTALL/sbin' >> .bashrc")
ssh_command(master,"echo 'export HADOOP_MAPRED_HOME=\$HADOOP_INSTALL' >> .bashrc")
ssh_command(master,"echo 'export HADOOP_COMMON_HOME=\$HADOOP_INSTALL' >> .bashrc")
ssh_command(master,"echo 'export HADOOP_HDFS_HOME=\$HADOOP_INSTALL' >> .bashrc")
ssh_command(master,"echo 'export YARN_HOME=\$HADOOP_INSTALL' >> .bashrc")
#Remove *-site.xmls
ssh_command(master,"cd sigmoid/hadoop-2.0.0-cdh4.2.0;rm etc/hadoop/core-site.xml")
ssh_command(master,"cd sigmoid/hadoop-2.0.0-cdh4.2.0;rm etc/hadoop/yarn-site.xml")
ssh_command(master,"cd sigmoid/hadoop-2.0.0-cdh4.2.0;rm etc/hadoop/hdfs-site.xml")
#Download Our Confs
ssh_command(master,"cd sigmoid/hadoop-2.0.0-cdh4.2.0/etc/hadoop/;wget https://s3.amazonaws.com/sigmoidanalytics-builds/spark/0.9.1/gce/configs/core-site.xml")
ssh_command(master,"cd sigmoid/hadoop-2.0.0-cdh4.2.0/etc/hadoop/;wget https://s3.amazonaws.com/sigmoidanalytics-builds/spark/0.9.1/gce/configs/hdfs-site.xml")
ssh_command(master,"cd sigmoid/hadoop-2.0.0-cdh4.2.0/etc/hadoop/;wget https://s3.amazonaws.com/sigmoidanalytics-builds/spark/0.9.1/gce/configs/mapred-site.xml")
ssh_command(master,"cd sigmoid/hadoop-2.0.0-cdh4.2.0/etc/hadoop/;wget https://s3.amazonaws.com/sigmoidanalytics-builds/spark/0.9.1/gce/configs/yarn-site.xml")
#Config Core-site
ssh_command(master,"sed -i \"s/PUT-MASTER-IP/$(/sbin/ifconfig eth0 | grep \"inet addr:\" | cut -d: -f2 | cut -d\" \" -f1)/g\" sigmoid/hadoop-2.0.0-cdh4.2.0/etc/hadoop/core-site.xml")
#Create data/node dirs
ssh_command(master,"mkdir -p /mnt/hadoop/hdfs/namenode;mkdir -p /mnt/hadoop/hdfs/datanode")
#Config slaves
ssh_command(master,"cd sigmoid/hadoop-2.0.0-cdh4.2.0/etc/hadoop/;rm slaves")
for slave in slave_nodes:
ssh_command(master,"cd sigmoid/hadoop-2.0.0-cdh4.2.0/etc/hadoop/;echo " + slave + " >> slaves")
print '[ Rsyncing with Slaves ]'
#Rsync everything
for slave in slave_nodes:
ssh_command(master,"rsync -za /home/" + username + "/sigmoid " + slave + ":")
ssh_command(slave,"mkdir -p /mnt/hadoop/hdfs/namenode;mkdir -p /mnt/hadoop/hdfs/datanode")
ssh_command(master,"rsync -za /home/" + username + "/.bashrc " + slave + ":")
print '[ Formating namenode ]'
#Format namenode
ssh_command(master,"sigmoid/hadoop-2.0.0-cdh4.2.0/bin/hdfs namenode -format")
print '[ Starting DFS ]'
#Start dfs
ssh_command(master,"sigmoid/hadoop-2.0.0-cdh4.2.0/sbin/start-dfs.sh")
def setup_shark(master_nodes,slave_nodes):
master = master_nodes[0]
print '[ Downloading Shark binaries ]'
ssh_command(master,"cd sigmoid;wget https://s3.amazonaws.com/spark-ui/hive-0.11.0-bin.tgz")
ssh_command(master,"cd sigmoid;wget https://s3.amazonaws.com/spark-ui/shark-0.9-hadoop-2.0.0-mr1-cdh4.2.0.tar.gz")
ssh_command(master,"cd sigmoid;tar zxf hive-0.11.0-bin.tgz")
ssh_command(master,"cd sigmoid;tar zxf shark-0.9-hadoop-2.0.0-mr1-cdh4.2.0.tar.gz")
ssh_command(master,"rm sigmoid/hive-0.11.0-bin.tgz")
ssh_command(master,"rm sigmoid/shark-0.9-hadoop-2.0.0-mr1-cdh4.2.0.tar.gz")
print '[ Configuring Shark ]'
ssh_command(master,"cd sigmoid/shark/;echo \"export SHARK_MASTER_MEM=1g\" > conf/shark-env.sh")
ssh_command(master,"cd sigmoid/shark/;echo \"SPARK_JAVA_OPTS+=\\\" -Dspark.kryoserializer.buffer.mb=10 \\\"\" >> conf/shark-env.sh")
ssh_command(master,"cd sigmoid/shark/;echo \"export SPARK_JAVA_OPTS\" >> conf/shark-env.sh")
ssh_command(master,"cd sigmoid/shark/;echo \"export HIVE_HOME=/home/`whoami`/sigmoid/hive-0.11.0-bin\" >> conf/shark-env.sh")
ssh_command(master,"cd sigmoid/shark/;echo \"export SPARK_JAVA_OPTS\" >> conf/shark-env.sh")
ssh_command(master,"cd sigmoid/shark/;echo \"export MASTER=spark://PUT_MASTER_IP_HERE:7077\" >> conf/shark-env.sh")
ssh_command(master,"cd sigmoid/shark/;echo \"export SPARK_HOME=/home/`whoami`/sigmoid/spark-0.9.1-bin-cdh4\" >> conf/shark-env.sh")
ssh_command(master,"mkdir /mnt/tachyon")
ssh_command(master,"cd sigmoid/shark/;echo \"export TACHYON_MASTER=PUT_MASTER_IP_HERE:19998\" >> conf/shark-env.sh")
ssh_command(master,"cd sigmoid/shark/;echo \"export TACHYON_WAREHOUSE_PATH=/mnt/tachyon\" >> conf/shark-env.sh")
ssh_command(master,"cd sigmoid/shark/;echo \"source /home/`whoami`/sigmoid/spark-0.9.1-bin-cdh4/conf/spark-env.sh\" >> conf/shark-env.sh")
ssh_command(master,"sed -i \"s/PUT_MASTER_IP_HERE/$(/sbin/ifconfig eth0 | grep \"inet addr:\" | cut -d: -f2 | cut -d\" \" -f1)/g\" sigmoid/shark/conf/shark-env.sh")
ssh_command(master,"chmod +x sigmoid/shark/conf/shark-env.sh")
print '[ Rsyncing Shark on slaves ]'
for slave in slave_nodes:
ssh_command(master,"rsync -za /home/" + username + "/sigmoid " + slave + ":")
print '[ Starting Shark Server ]'
ssh_command(master,"cd sigmoid/shark/;./bin/shark --service sharkserver 10000 > log.txt 2>&1 &")
def show_banner():
os.system("wget -qO- https://s3.amazonaws.com/sigmoidanalytics-builds/spark/0.9.1/gce/configs/banner")
def real_main():
show_banner()
print "[ Script Started ]"
#Read the arguments
read_args()
#Make sure gcloud is accessible.
check_gcloud()
#Launch the cluster
launch_cluster()
#Wait some time for machines to bootup
print '[ Waiting 120 Seconds for Machines to start up ]'
time.sleep(120)
#Get Master/Slave IP Addresses
(master_nodes, slave_nodes) = get_cluster_ips()
#Install Java and build-essential
install_java(master_nodes,slave_nodes)
#Generate SSH keys and deploy
deploy_keys(master_nodes,slave_nodes)
#Attach a new empty drive and format it
attach_drive(master_nodes,slave_nodes)
#Set up Spark/Shark/Hadoop
setup_spark(master_nodes,slave_nodes)
def main():
try:
real_main()
except Exception as e:
print >> stderr, "\nError:\n", e
if __name__ == "__main__":
main()
| sigmoidanalytics/spark_gce | spark_gce.py | Python | apache-2.0 | 20,428 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classification of real numbers."""
__author__ = 'Sean Lip'
from core.tests import test_utils
from extensions.rules import real
class RealRuleUnitTests(test_utils.GenericTestBase):
"""Tests for rules operating on Real objects."""
def test_equals_rule(self):
self.assertTrue(real.Equals(3).eval(3))
self.assertTrue(real.Equals(3.0).eval(3))
self.assertFalse(real.Equals(4).eval(3))
def test_is_less_than_rule(self):
self.assertTrue(real.IsLessThan(4).eval(3))
self.assertTrue(real.IsLessThan(4).eval(3.0))
self.assertTrue(real.IsLessThan(4.0).eval(3.0))
self.assertFalse(real.IsLessThan(3).eval(3))
self.assertFalse(real.IsLessThan(3.0).eval(3.0))
self.assertFalse(real.IsLessThan(3.0).eval(4.0))
self.assertFalse(real.IsLessThan(3).eval(4))
def test_is_greater_than_rule(self):
self.assertTrue(real.IsGreaterThan(3).eval(4))
self.assertTrue(real.IsGreaterThan(3.0).eval(4))
self.assertTrue(real.IsGreaterThan(3.0).eval(4.0))
self.assertFalse(real.IsGreaterThan(3).eval(3))
self.assertFalse(real.IsGreaterThan(3.0).eval(3.0))
self.assertFalse(real.IsGreaterThan(4.0).eval(3.0))
self.assertFalse(real.IsGreaterThan(4).eval(3))
def test_is_less_than_or_equal_to_rule(self):
rule = real.IsLessThanOrEqualTo(3)
self.assertTrue(rule.eval(2))
self.assertTrue(rule.eval(3))
self.assertFalse(rule.eval(4))
def test_is_greater_than_or_equal_to_rule(self):
rule = real.IsGreaterThanOrEqualTo(3)
self.assertTrue(rule.eval(4))
self.assertTrue(rule.eval(3))
self.assertFalse(rule.eval(2))
def test_is_inclusively_between_rule(self):
with self.assertRaises(AssertionError):
real.IsInclusivelyBetween(2, 1)
rule = real.IsInclusivelyBetween(1, 3)
self.assertTrue(rule.eval(2))
self.assertTrue(rule.eval(1))
self.assertTrue(rule.eval(3))
self.assertTrue(rule.eval(1.0))
self.assertFalse(rule.eval(3.001))
def test_is_within_tolerance_rule(self):
rule = real.IsWithinTolerance(0.5, 0)
self.assertTrue(rule.eval(0))
self.assertTrue(rule.eval(0.5))
self.assertFalse(rule.eval(0.51))
| won0089/oppia | extensions/rules/real_test.py | Python | apache-2.0 | 2,944 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from builtins import str
from textwrap import dedent
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.base.exceptions import TargetDefinitionException
from pants.base.parse_context import ParseContext
from pants.build_graph.address import Address
from pants.build_graph.app_base import Bundle, DirectoryReMapper
from pants.source.wrapped_globs import Globs
from pants_test.test_base import TestBase
def _bundle(rel_path):
pc = ParseContext(rel_path=rel_path, type_aliases={})
return Bundle(pc)
def _globs(rel_path):
pc = ParseContext(rel_path=rel_path, type_aliases={})
return Globs(pc)
class JvmAppTest(TestBase):
def test_simple(self):
binary_target = self.make_target(':foo-binary', JvmBinary, main='com.example.Foo')
app_target = self.make_target(':foo', JvmApp, basename='foo-app', binary=':foo-binary')
self.assertEqual('foo-app', app_target.payload.basename)
self.assertEqual('foo-app', app_target.basename)
self.assertEqual(binary_target, app_target.binary)
self.assertEqual([':foo-binary'], list(app_target.compute_dependency_specs(payload=app_target.payload)))
def test_jvmapp_bundle_payload_fields(self):
app_target = self.make_target(':foo_payload',
JvmApp,
basename='foo-payload-app',
archive='zip')
self.assertEqual('foo-payload-app', app_target.payload.basename)
self.assertIsNone(app_target.payload.deployjar)
self.assertEqual('zip', app_target.payload.archive)
def test_bad_basename(self):
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmApp.* basename must not equal name.'):
self.make_target(':foo', JvmApp, basename='foo')
def create_app(self, rel_path, name=None, **kwargs):
self.create_file(os.path.join(rel_path, 'config/densities.xml'))
return self.make_target(Address(rel_path, name or 'app').spec,
JvmApp,
bundles=[_bundle(rel_path)(fileset='config/densities.xml')],
**kwargs)
def test_binary_via_binary(self):
bin = self.make_target('src/java/org/archimedes/buoyancy:bin', JvmBinary)
app = self.create_app('src/java/org/archimedes/buoyancy', binary=':bin')
self.assertEqual(app.binary, bin)
def test_binary_via_dependencies(self):
bin = self.make_target('src/java/org/archimedes/buoyancy:bin', JvmBinary)
app = self.create_app('src/java/org/archimedes/buoyancy', dependencies=[bin])
self.assertEqual(app.binary, bin)
def test_degenerate_binaries(self):
bin = self.make_target('src/java/org/archimedes/buoyancy:bin', JvmBinary)
app = self.create_app('src/java/org/archimedes/buoyancy', binary=':bin', dependencies=[bin])
self.assertEqual(app.binary, bin)
def test_no_binary(self):
app = self.create_app('src/java/org/archimedes/buoyancy')
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmApp.*src/java/org/archimedes/buoyancy:app\).*'
r' An app must define exactly one'):
app.binary
def test_too_many_binaries_mixed(self):
self.make_target('src/java/org/archimedes/buoyancy:bin', JvmBinary)
bin2 = self.make_target('src/java/org/archimedes/buoyancy:bin2', JvmBinary)
app = self.create_app('src/java/org/archimedes/buoyancy', binary=':bin', dependencies=[bin2])
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmApp.*src/java/org/archimedes/buoyancy:app\).*'
r' An app must define exactly one'):
app.binary
def test_too_many_binaries_via_deps(self):
bin = self.make_target('src/java/org/archimedes/buoyancy:bin', JvmBinary)
bin2 = self.make_target('src/java/org/archimedes/buoyancy:bin2', JvmBinary)
app = self.create_app('src/java/org/archimedes/buoyancy', dependencies=[bin, bin2])
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmApp.*src/java/org/archimedes/buoyancy:app\).*'
r' An app must define exactly one'):
app.binary
def test_not_a_binary(self):
self.make_target('src/java/org/archimedes/buoyancy:bin', JvmBinary)
self.create_app('src/java/org/archimedes/buoyancy', name='app', binary=':bin')
app = self.create_app('src/java/org/archimedes/buoyancy', name='app2', binary=':app')
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmApp.*src/java/org/archimedes/buoyancy:app2\).*'
r' Expected binary dependency'):
app.binary
class BundleTest(TestBase):
def test_bundle_filemap_dest_bypath(self):
spec_path = 'src/java/org/archimedes/buoyancy'
densities = self.create_file(os.path.join(spec_path, 'config/densities.xml'))
unused = self.make_target(Address(spec_path, 'unused').spec, JvmBinary)
app = self.make_target(spec_path,
JvmApp,
dependencies=[unused],
bundles=[_bundle(spec_path)(fileset='config/densities.xml')])
self.assertEqual(1, len(app.bundles))
# after one big refactor, ../../../../../ snuck into this path:
self.assertEqual({densities: 'config/densities.xml'}, app.bundles[0].filemap)
def test_bundle_filemap_dest_byglobs(self):
spec_path = 'src/java/org/archimedes/tub'
one = self.create_file(os.path.join(spec_path, 'config/one.xml'))
two = self.create_file(os.path.join(spec_path, 'config/two.xml'))
unused = self.make_target(Address(spec_path, 'unused').spec, JvmBinary)
globs = _globs(spec_path)
app = self.make_target(spec_path,
JvmApp,
dependencies=[unused],
bundles=[_bundle(spec_path)(fileset=globs('config/*.xml'))])
self.assertEqual(1, len(app.bundles))
self.assertEqual({one: 'config/one.xml', two: 'config/two.xml'}, app.bundles[0].filemap)
def test_bundle_filemap_dest_relative(self):
spec_path = 'src/java/org/archimedes/crown'
five = self.create_file(os.path.join(spec_path, 'gold/config/five.xml'))
unused = self.make_target(Address(spec_path, 'unused').spec, JvmBinary)
app = self.make_target(spec_path,
JvmApp,
dependencies=[unused],
bundles=[_bundle(spec_path)(relative_to='gold',
fileset='gold/config/five.xml')])
self.assertEqual(1, len(app.bundles))
self.assertEqual({five: 'config/five.xml'}, app.bundles[0].filemap)
def test_bundle_filemap_dest_remap(self):
spec_path = 'src/java/org/archimedes/crown'
one = self.create_file(os.path.join(spec_path, 'config/one.xml'))
unused = self.make_target(Address(spec_path, 'unused').spec, JvmBinary)
mapper = DirectoryReMapper(os.path.join(spec_path, 'config'), 'gold/config')
app = self.make_target(spec_path,
JvmApp,
dependencies=[unused],
bundles=[_bundle(spec_path)(mapper=mapper, fileset='config/one.xml')])
self.assertEqual(1, len(app.bundles))
self.assertEqual({one: 'gold/config/one.xml'}, app.bundles[0].filemap)
def test_bundle_filemap_remap_base_not_exists(self):
# Create directly
with self.assertRaises(DirectoryReMapper.NonexistentBaseError):
DirectoryReMapper("dummy/src/java/org/archimedes/crown/missing", "dummy")
def test_bundle_add(self):
spec_path = 'src/java/org/archimedes/volume'
stone_dense = self.create_file(os.path.join(spec_path, 'config/stone/dense.xml'))
metal_dense = self.create_file(os.path.join(spec_path, 'config/metal/dense.xml'))
unused = self.make_target(Address(spec_path, 'unused').spec, JvmBinary)
bundle = _bundle(spec_path)(relative_to='config',
fileset=['config/stone/dense.xml', 'config/metal/dense.xml'])
app = self.make_target(spec_path, JvmApp, dependencies=[unused], bundles=[bundle])
self.assertEqual(1, len(app.bundles))
self.assertEqual({stone_dense: 'stone/dense.xml', metal_dense: 'metal/dense.xml'},
app.bundles[0].filemap)
def test_multiple_bundles(self):
spec_path = 'src/java/org/archimedes/volume'
stone_dense = self.create_file(os.path.join(spec_path, 'config/stone/dense.xml'))
metal_dense = self.create_file(os.path.join(spec_path, 'config/metal/dense.xml'))
unused = self.make_target(Address(spec_path, 'unused').spec, JvmBinary)
self.add_to_build_file('src/java/org/archimedes/volume/BUILD', dedent("""
jvm_app(name='volume',
dependencies=[':unused'],
bundles=[
bundle(relative_to='config', fileset='config/stone/dense.xml')
]
)
jvm_app(name='bathtub',
dependencies=[':unused'],
bundles=[
bundle(fileset='config/metal/dense.xml')
]
)
"""))
app1 = self.make_target(Address(spec_path, 'app1').spec,
JvmApp,
dependencies=[unused],
bundles=[_bundle(spec_path)(relative_to='config',
fileset='config/stone/dense.xml')])
app2 = self.make_target(Address(spec_path, 'app2').spec,
JvmApp,
dependencies=[unused],
bundles=[_bundle(spec_path)(fileset='config/metal/dense.xml')])
self.assertEqual(1, len(app1.bundles))
self.assertEqual({stone_dense: 'stone/dense.xml'}, app1.bundles[0].filemap)
self.assertEqual(1, len(app2.bundles))
self.assertEqual({metal_dense: 'config/metal/dense.xml'}, app2.bundles[0].filemap)
def test_globs_relative_to_build_root(self):
spec_path = 'y'
unused = self.make_target(spec_path, JvmBinary)
globs = _globs(spec_path)
app = self.make_target('y:app',
JvmApp,
dependencies=[unused],
bundles=[
_bundle(spec_path)(fileset=globs("z/*")),
_bundle(spec_path)(fileset=['a/b'])
])
self.assertEqual(['y/a/b', 'y/z/*'], sorted(app.globs_relative_to_buildroot()['globs']))
def test_list_of_globs_fails(self):
# It's not allowed according to the docs, and will behave badly.
spec_path = 'y'
globs = _globs(spec_path)
with self.assertRaises(ValueError):
_bundle(spec_path)(fileset=[globs("z/*")])
def test_jvmapp_fingerprinting(self):
spec_path = 'y'
globs = _globs(spec_path)
self.create_file(os.path.join(spec_path, 'one.xml'))
self.create_file(os.path.join(spec_path, 'config/two.xml'))
def calc_fingerprint():
# Globs are eagerly, therefore we need to recreate target to recalculate fingerprint.
self.reset_build_graph()
app = self.make_target('y:app',
JvmApp,
dependencies=[],
bundles=[
_bundle(spec_path)(fileset=globs("*"))
])
return app.payload.fingerprint()
fingerprint_before = calc_fingerprint()
os.mkdir(os.path.join(self.build_root, spec_path, 'folder_one'))
self.assertEqual(fingerprint_before, calc_fingerprint())
self.create_file(os.path.join(spec_path, 'three.xml'))
self.assertNotEqual(fingerprint_before, calc_fingerprint())
def test_jvmapp_fingerprinting_with_non_existing_files(self):
spec_path = 'y'
def calc_fingerprint():
self.reset_build_graph()
return self.make_target('y:app',
JvmApp,
dependencies=[],
bundles=[
_bundle(spec_path)(fileset=['one.xml'])
]).payload.fingerprint()
fingerprint_non_existing_file = calc_fingerprint()
self.create_file(os.path.join(spec_path, 'one.xml'))
fingerprint_empty_file = calc_fingerprint()
self.create_file(os.path.join(spec_path, 'one.xml'), contents='some content')
fingerprint_file_with_content = calc_fingerprint()
self.assertNotEqual(fingerprint_empty_file, fingerprint_non_existing_file)
self.assertNotEqual(fingerprint_empty_file, fingerprint_file_with_content)
self.assertNotEqual(fingerprint_file_with_content, fingerprint_empty_file)
def test_rel_path_with_glob_fails(self):
# Globs are treated as eager, so rel_path doesn't affect their meaning.
# The effect of this is likely to be confusing, so disallow it.
spec_path = 'y'
self.create_file(os.path.join(spec_path, 'z', 'somefile'))
globs = _globs(spec_path)
with self.assertRaises(ValueError) as cm:
_bundle(spec_path)(rel_path="config", fileset=globs('z/*'))
self.assertIn("Must not use a glob for 'fileset' with 'rel_path'.", str(cm.exception))
def test_allow_globs_when_rel_root_matches_rel_path(self):
# If a glob has the same rel_root as the rel_path, then
# it will correctly pick up the right files.
# We don't allow BUILD files to have declarations with this state.
# But filesets can be created this way via macros or pants internals.
self.create_file(os.path.join('y', 'z', 'somefile'))
bundle = _bundle('y')(rel_path="y/z", fileset=_globs('y/z')('*'))
self.assertEqual({'globs': [u'y/z/*']}, bundle.fileset.filespec)
def test_rel_path_overrides_context_rel_path_for_explicit_path(self):
spec_path = 'y'
unused = self.make_target(spec_path, JvmBinary)
app = self.make_target('y:app',
JvmApp,
dependencies=[unused],
bundles=[
_bundle(spec_path)(rel_path="config", fileset=['a/b'])
])
self.assertEqual({os.path.join(self.build_root, 'config/a/b'): 'a/b'}, app.bundles[0].filemap)
self.assertEqual(['config/a/b'], sorted(app.globs_relative_to_buildroot()['globs']))
| twitter/pants | tests/python/pants_test/backend/jvm/targets/test_jvm_app.py | Python | apache-2.0 | 14,693 |
import sys
import ray
import pytest
from ray.test_utils import (
generate_system_config_map,
wait_for_condition,
wait_for_pid_to_exit,
)
@ray.remote
class Increase:
def method(self, x):
return x + 2
@ray.remote
def increase(x):
return x + 1
@pytest.mark.parametrize(
"ray_start_regular", [
generate_system_config_map(
num_heartbeats_timeout=20, ping_gcs_rpc_server_max_retries=60)
],
indirect=True)
def test_gcs_server_restart(ray_start_regular):
actor1 = Increase.remote()
result = ray.get(actor1.method.remote(1))
assert result == 3
ray.worker._global_node.kill_gcs_server()
ray.worker._global_node.start_gcs_server()
result = ray.get(actor1.method.remote(7))
assert result == 9
actor2 = Increase.remote()
result = ray.get(actor2.method.remote(2))
assert result == 4
result = ray.get(increase.remote(1))
assert result == 2
@pytest.mark.parametrize(
"ray_start_regular", [
generate_system_config_map(
num_heartbeats_timeout=20, ping_gcs_rpc_server_max_retries=60)
],
indirect=True)
def test_gcs_server_restart_during_actor_creation(ray_start_regular):
ids = []
for i in range(0, 100):
actor = Increase.remote()
ids.append(actor.method.remote(1))
ray.worker._global_node.kill_gcs_server()
ray.worker._global_node.start_gcs_server()
ready, unready = ray.wait(ids, num_returns=100, timeout=240)
print("Ready objects is {}.".format(ready))
print("Unready objects is {}.".format(unready))
assert len(unready) == 0
@pytest.mark.parametrize(
"ray_start_cluster_head", [
generate_system_config_map(
num_heartbeats_timeout=20, ping_gcs_rpc_server_max_retries=60)
],
indirect=True)
def test_node_failure_detector_when_gcs_server_restart(ray_start_cluster_head):
"""Checks that the node failure detector is correct when gcs server restart.
We set the cluster to timeout nodes after 2 seconds of heartbeats. We then
kill gcs server and remove the worker node and restart gcs server again to
check that the removed node will die finally.
"""
cluster = ray_start_cluster_head
worker = cluster.add_node()
cluster.wait_for_nodes()
# Make sure both head and worker node are alive.
nodes = ray.nodes()
assert len(nodes) == 2
assert nodes[0]["alive"] and nodes[1]["alive"]
to_be_removed_node = None
for node in nodes:
if node["RayletSocketName"] == worker.raylet_socket_name:
to_be_removed_node = node
assert to_be_removed_node is not None
head_node = cluster.head_node
gcs_server_process = head_node.all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
# Kill gcs server.
cluster.head_node.kill_gcs_server()
# Wait to prevent the gcs server process becoming zombie.
gcs_server_process.wait()
wait_for_pid_to_exit(gcs_server_pid, 1000)
raylet_process = worker.all_processes["raylet"][0].process
raylet_pid = raylet_process.pid
# Remove worker node.
cluster.remove_node(worker, allow_graceful=False)
# Wait to prevent the raylet process becoming zombie.
raylet_process.wait()
wait_for_pid_to_exit(raylet_pid)
# Restart gcs server process.
cluster.head_node.start_gcs_server()
def condition():
nodes = ray.nodes()
assert len(nodes) == 2
for node in nodes:
if node["NodeID"] == to_be_removed_node["NodeID"]:
return not node["alive"]
return False
# Wait for the removed node dead.
wait_for_condition(condition, timeout=10)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| robertnishihara/ray | python/ray/tests/test_gcs_fault_tolerance.py | Python | apache-2.0 | 3,783 |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Middleware that will provide Static Large Object (SLO) support.
This feature is very similar to Dynamic Large Object (DLO) support in that
it allows the user to upload many objects concurrently and afterwards
download them as a single object. It is different in that it does not rely
on eventually consistent container listings to do so. Instead, a user
defined manifest of the object segments is used.
----------------------
Uploading the Manifest
----------------------
After the user has uploaded the objects to be concatenated a manifest is
uploaded. The request must be a PUT with the query parameter::
?multipart-manifest=put
The body of this request will be an ordered list of files in
json data format. The data to be supplied for each segment is::
path: the path to the segment (not including account)
/container/object_name
etag: the etag given back when the segment was PUT
size_bytes: the size of the segment in bytes
The format of the list will be::
json:
[{"path": "/cont/object",
"etag": "etagoftheobjectsegment",
"size_bytes": 1048576}, ...]
The number of object segments is limited to a configurable amount, default
1000. Each segment, except for the final one, must be at least 1 megabyte
(configurable). On upload, the middleware will head every segment passed in and
verify the size and etag of each. If any of the objects do not match (not
found, size/etag mismatch, below minimum size) then the user will receive a 4xx
error response. If everything does match, the user will receive a 2xx response
and the SLO object is ready for downloading.
Behind the scenes, on success, a json manifest generated from the user input is
sent to object servers with an extra "X-Static-Large-Object: True" header
and a modified Content-Type. The parameter: swift_bytes=$total_size will be
appended to the existing Content-Type, where total_size is the sum of all
the included segments' size_bytes. This extra parameter will be hidden from
the user.
Manifest files can reference objects in separate containers, which will improve
concurrent upload speed. Objects can be referenced by multiple manifests. The
segments of a SLO manifest can even be other SLO manifests. Treat them as any
other object i.e., use the Etag and Content-Length given on the PUT of the
sub-SLO in the manifest to the parent SLO.
-------------------------
Retrieving a Large Object
-------------------------
A GET request to the manifest object will return the concatenation of the
objects from the manifest much like DLO. If any of the segments from the
manifest are not found or their Etag/Content Length no longer match the
connection will drop. In this case a 409 Conflict will be logged in the proxy
logs and the user will receive incomplete results.
The headers from this GET or HEAD request will return the metadata attached
to the manifest object itself with some exceptions::
Content-Length: the total size of the SLO (the sum of the sizes of
the segments in the manifest)
X-Static-Large-Object: True
Etag: the etag of the SLO (generated the same way as DLO)
A GET request with the query parameter::
?multipart-manifest=get
Will return the actual manifest file itself. This is generated json and does
not match the data sent from the original multipart-manifest=put. This call's
main purpose is for debugging.
When the manifest object is uploaded you are more or less guaranteed that
every segment in the manifest exists and matched the specifications.
However, there is nothing that prevents the user from breaking the
SLO download by deleting/replacing a segment referenced in the manifest. It is
left to the user use caution in handling the segments.
-----------------------
Deleting a Large Object
-----------------------
A DELETE request will just delete the manifest object itself.
A DELETE with a query parameter::
?multipart-manifest=delete
will delete all the segments referenced in the manifest and then the manifest
itself. The failure response will be similar to the bulk delete middleware.
------------------------
Modifying a Large Object
------------------------
PUTs / POSTs will work as expected, PUTs will just overwrite the manifest
object for example.
------------------
Container Listings
------------------
In a container listing the size listed for SLO manifest objects will be the
total_size of the concatenated segments in the manifest. The overall
X-Container-Bytes-Used for the container (and subsequently for the account)
will not reflect total_size of the manifest but the actual size of the json
data stored. The reason for this somewhat confusing discrepancy is we want the
container listing to reflect the size of the manifest object when it is
downloaded. We do not, however, want to count the bytes-used twice (for both
the manifest and the segments it's referring to) in the container and account
metadata which can be used for stats purposes.
"""
from urllib import quote
from cStringIO import StringIO
from datetime import datetime
import mimetypes
from hashlib import md5
from swift.common.swob import Request, HTTPBadRequest, HTTPServerError, \
HTTPMethodNotAllowed, HTTPRequestEntityTooLarge, HTTPLengthRequired, \
HTTPOk, HTTPPreconditionFailed, HTTPException, HTTPNotFound, \
HTTPUnauthorized
from swift.common.utils import json, get_logger, config_true_value
from swift.common.constraints import check_utf8, MAX_BUFFERED_SLO_SEGMENTS
from swift.common.http import HTTP_NOT_FOUND, HTTP_UNAUTHORIZED
from swift.common.wsgi import WSGIContext
from swift.common.middleware.bulk import get_response_body, \
ACCEPTABLE_FORMATS, Bulk
def parse_input(raw_data):
"""
Given a request will parse the body and return a list of dictionaries
:raises: HTTPException on parse errors
:returns: a list of dictionaries on success
"""
try:
parsed_data = json.loads(raw_data)
except ValueError:
raise HTTPBadRequest("Manifest must be valid json.")
req_keys = set(['path', 'etag', 'size_bytes'])
try:
for seg_dict in parsed_data:
if (set(seg_dict) != req_keys or
'/' not in seg_dict['path'].lstrip('/')):
raise HTTPBadRequest('Invalid SLO Manifest File')
except (AttributeError, TypeError):
raise HTTPBadRequest('Invalid SLO Manifest File')
return parsed_data
class SloContext(WSGIContext):
def __init__(self, slo, slo_etag):
WSGIContext.__init__(self, slo.app)
self.slo_etag = '"' + slo_etag.hexdigest() + '"'
def handle_slo_put(self, req, start_response):
app_resp = self._app_call(req.environ)
for i in xrange(len(self._response_headers)):
if self._response_headers[i][0].lower() == 'etag':
self._response_headers[i] = ('Etag', self.slo_etag)
break
start_response(self._response_status,
self._response_headers,
self._response_exc_info)
return app_resp
class StaticLargeObject(object):
"""
StaticLargeObject Middleware
See above for a full description.
The proxy logs created for any subrequests made will have swift.source set
to "SLO".
:param app: The next WSGI filter or app in the paste.deploy chain.
:param conf: The configuration dict for the middleware.
"""
def __init__(self, app, conf):
self.conf = conf
self.app = app
self.logger = get_logger(conf, log_route='slo')
self.max_manifest_segments = int(self.conf.get('max_manifest_segments',
1000))
self.max_manifest_size = int(self.conf.get('max_manifest_size',
1024 * 1024 * 2))
self.min_segment_size = int(self.conf.get('min_segment_size',
1024 * 1024))
self.bulk_deleter = Bulk(app, {})
def handle_multipart_put(self, req, start_response):
"""
Will handle the PUT of a SLO manifest.
Heads every object in manifest to check if is valid and if so will
save a manifest generated from the user input. Uses WSGIContext to
call self.app and start_response and returns a WSGI iterator.
:params req: a swob.Request with an obj in path
:raises: HttpException on errors
"""
try:
vrs, account, container, obj = req.split_path(1, 4, True)
except ValueError:
return self.app(req.environ, start_response)
if req.content_length > self.max_manifest_size:
raise HTTPRequestEntityTooLarge(
"Manifest File > %d bytes" % self.max_manifest_size)
if req.headers.get('X-Copy-From'):
raise HTTPMethodNotAllowed(
'Multipart Manifest PUTs cannot be Copy requests')
if req.content_length is None and \
req.headers.get('transfer-encoding', '').lower() != 'chunked':
raise HTTPLengthRequired(request=req)
parsed_data = parse_input(req.body_file.read(self.max_manifest_size))
problem_segments = []
if len(parsed_data) > self.max_manifest_segments:
raise HTTPRequestEntityTooLarge(
'Number segments must be <= %d' % self.max_manifest_segments)
total_size = 0
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
if not out_content_type:
out_content_type = 'text/plain'
data_for_storage = []
slo_etag = md5()
for index, seg_dict in enumerate(parsed_data):
obj_name = seg_dict['path']
if isinstance(obj_name, unicode):
obj_name = obj_name.encode('utf-8')
obj_path = '/'.join(['', vrs, account, obj_name.lstrip('/')])
try:
seg_size = int(seg_dict['size_bytes'])
except (ValueError, TypeError):
raise HTTPBadRequest('Invalid Manifest File')
if seg_size < self.min_segment_size and \
(index == 0 or index < len(parsed_data) - 1):
raise HTTPBadRequest(
'Each segment, except the last, must be larger than '
'%d bytes.' % self.min_segment_size)
new_env = req.environ.copy()
new_env['PATH_INFO'] = obj_path
new_env['REQUEST_METHOD'] = 'HEAD'
new_env['swift.source'] = 'SLO'
del(new_env['wsgi.input'])
del(new_env['QUERY_STRING'])
new_env['CONTENT_LENGTH'] = 0
new_env['HTTP_USER_AGENT'] = \
'%s MultipartPUT' % req.environ.get('HTTP_USER_AGENT')
headers = {'fingerprint':seg_dict['etag']}
head_seg_resp = \
Request.blank(obj_path, headers=headers, environ=new_env).get_response(self.app)
if head_seg_resp.is_success:
total_size += seg_size
if seg_size != head_seg_resp.content_length:
problem_segments.append([quote(obj_name), 'Size Mismatch'])
if seg_dict['etag'] == head_seg_resp.etag:
slo_etag.update(seg_dict['etag'])
else:
problem_segments.append([quote(obj_name), 'Etag Mismatch'])
if head_seg_resp.last_modified:
last_modified = head_seg_resp.last_modified
else:
# shouldn't happen
last_modified = datetime.now()
last_modified_formatted = \
last_modified.strftime('%Y-%m-%dT%H:%M:%S.%f')
seg_data = {'name': '/' + seg_dict['path'].lstrip('/'),
'bytes': seg_size,
'hash': seg_dict['etag'],
'content_type': head_seg_resp.content_type,
'last_modified': last_modified_formatted}
if config_true_value(
head_seg_resp.headers.get('X-Static-Large-Object')):
seg_data['sub_slo'] = True
data_for_storage.append(seg_data)
else:
problem_segments.append([quote(obj_name),
head_seg_resp.status])
if problem_segments:
resp_body = get_response_body(
out_content_type, {}, problem_segments)
raise HTTPBadRequest(resp_body, content_type=out_content_type)
env = req.environ
if not env.get('CONTENT_TYPE'):
guessed_type, _junk = mimetypes.guess_type(req.path_info)
env['CONTENT_TYPE'] = guessed_type or 'application/octet-stream'
env['swift.content_type_overriden'] = True
env['CONTENT_TYPE'] += ";swift_bytes=%d" % total_size
env['HTTP_X_STATIC_LARGE_OBJECT'] = 'True'
json_data = json.dumps(data_for_storage)
env['CONTENT_LENGTH'] = str(len(json_data))
env['wsgi.input'] = StringIO(json_data)
slo_context = SloContext(self, slo_etag)
return slo_context.handle_slo_put(req, start_response)
def get_segments_to_delete_iter(self, req):
"""
A generator function to be used to delete all the segments and
sub-segments referenced in a manifest.
:params req: a swob.Request with an SLO manifest in path
:raises HTTPPreconditionFailed: on invalid UTF8 in request path
:raises HTTPBadRequest: on too many buffered sub segments and
on invalid SLO manifest path
"""
if not check_utf8(req.path_info):
raise HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
try:
vrs, account, container, obj = req.split_path(4, 4, True)
except ValueError:
raise HTTPBadRequest('Invalid SLO manifiest path')
segments = [{
'sub_slo': True,
'name': ('/%s/%s' % (container, obj)).decode('utf-8')}]
while segments:
if len(segments) > MAX_BUFFERED_SLO_SEGMENTS:
raise HTTPBadRequest(
'Too many buffered slo segments to delete.')
seg_data = segments.pop(0)
if seg_data.get('sub_slo'):
try:
segments.extend(
self.get_slo_segments(seg_data['name'], req))
except HTTPException as err:
# allow bulk delete response to report errors
seg_data['error'] = {'code': err.status_int,
'message': err.body}
# add manifest back to be deleted after segments
seg_data['sub_slo'] = False
segments.append(seg_data)
else:
seg_data['name'] = seg_data['name'].encode('utf-8')
yield seg_data
def get_slo_segments(self, obj_name, req):
"""
Performs a swob.Request and returns the SLO manifest's segments.
:raises HTTPServerError: on unable to load obj_name or
on unable to load the SLO manifest data.
:raises HTTPBadRequest: on not an SLO manifest
:raises HTTPNotFound: on SLO manifest not found
:returns: SLO manifest's segments
"""
vrs, account, _junk = req.split_path(2, 3, True)
new_env = req.environ.copy()
new_env['REQUEST_METHOD'] = 'GET'
del(new_env['wsgi.input'])
new_env['QUERY_STRING'] = 'multipart-manifest=get'
new_env['CONTENT_LENGTH'] = 0
new_env['HTTP_USER_AGENT'] = \
'%s MultipartDELETE' % new_env.get('HTTP_USER_AGENT')
new_env['swift.source'] = 'SLO'
new_env['PATH_INFO'] = (
'/%s/%s/%s' % (
vrs, account,
obj_name.lstrip('/'))).encode('utf-8')
resp = Request.blank('', new_env).get_response(self.app)
if resp.is_success:
if config_true_value(resp.headers.get('X-Static-Large-Object')):
try:
return json.loads(resp.body)
except ValueError:
raise HTTPServerError('Unable to load SLO manifest')
else:
raise HTTPBadRequest('Not an SLO manifest')
elif resp.status_int == HTTP_NOT_FOUND:
raise HTTPNotFound('SLO manifest not found')
elif resp.status_int == HTTP_UNAUTHORIZED:
raise HTTPUnauthorized('401 Unauthorized')
else:
raise HTTPServerError('Unable to load SLO manifest or segment.')
def handle_multipart_delete(self, req):
"""
Will delete all the segments in the SLO manifest and then, if
successful, will delete the manifest file.
:params req: a swob.Request with an obj in path
:returns: swob.Response whose app_iter set to Bulk.handle_delete_iter
"""
resp = HTTPOk(request=req)
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
if out_content_type:
resp.content_type = out_content_type
resp.app_iter = self.bulk_deleter.handle_delete_iter(
req, objs_to_delete=self.get_segments_to_delete_iter(req),
user_agent='MultipartDELETE', swift_source='SLO',
out_content_type=out_content_type)
return resp
def __call__(self, env, start_response):
"""
WSGI entry point
"""
req = Request(env)
try:
vrs, account, container, obj = req.split_path(1, 4, True)
except ValueError:
return self.app(env, start_response)
try:
if obj:
if req.method == 'PUT' and \
req.params.get('multipart-manifest') == 'put':
return self.handle_multipart_put(req, start_response)
if req.method == 'DELETE' and \
req.params.get('multipart-manifest') == 'delete':
return self.handle_multipart_delete(req)(env,
start_response)
if 'X-Static-Large-Object' in req.headers:
raise HTTPBadRequest(
request=req,
body='X-Static-Large-Object is a reserved header. '
'To create a static large object add query param '
'multipart-manifest=put.')
except HTTPException as err_resp:
return err_resp(env, start_response)
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def slo_filter(app):
return StaticLargeObject(app, conf)
return slo_filter
| lielongxingkong/windchimes | swift/common/middleware/slo.py | Python | apache-2.0 | 19,534 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from turnstile.checks import get_checks
from turnstile.manager import get_commands
CORE_COMMIT_MSG_CHECKS = ['branch_pattern', 'branch_release', 'branch_type', 'protect_master', 'specification']
CORE_SUBCOMMANDS = ['config', 'install', 'remove', 'specification', 'upgrade', 'version']
def test_checks():
checks = dict(get_checks('commit_msg'))
for check_name in CORE_COMMIT_MSG_CHECKS:
assert check_name in checks
def test_subcommands():
subcommands = dict(get_commands())
for subcommand_name in CORE_SUBCOMMANDS:
assert subcommand_name in subcommands
| zalando/turnstile | tests/test_discovery.py | Python | apache-2.0 | 636 |
import gevent
import socket
from vnc_api.vnc_api import *
from cfgm_common.vnc_kombu import VncKombuClient
from config_db import *
from cfgm_common.dependency_tracker import DependencyTracker
from reaction_map import REACTION_MAP
import svc_monitor
class RabbitConnection(object):
_REACTION_MAP = REACTION_MAP
def __init__(self, logger, args=None):
self._args = args
self.logger = logger
def _connect_rabbit(self):
rabbit_server = self._args.rabbit_server
rabbit_port = self._args.rabbit_port
rabbit_user = self._args.rabbit_user
rabbit_password = self._args.rabbit_password
rabbit_vhost = self._args.rabbit_vhost
rabbit_ha_mode = self._args.rabbit_ha_mode
self._db_resync_done = gevent.event.Event()
q_name = 'svc_mon.%s' % (socket.gethostname())
self._vnc_kombu = VncKombuClient(rabbit_server, rabbit_port,
rabbit_user, rabbit_password,
rabbit_vhost, rabbit_ha_mode,
q_name, self._vnc_subscribe_callback,
self.logger.log)
def _vnc_subscribe_callback(self, oper_info):
self._db_resync_done.wait()
try:
self._vnc_subscribe_actions(oper_info)
except Exception:
svc_monitor.cgitb_error_log(self)
def _vnc_subscribe_actions(self, oper_info):
msg = "Notification Message: %s" % (pformat(oper_info))
self.logger.log_debug(msg)
obj_type = oper_info['type'].replace('-', '_')
obj_class = DBBaseSM.get_obj_type_map().get(obj_type)
if obj_class is None:
return
if oper_info['oper'] == 'CREATE':
obj_dict = oper_info['obj_dict']
obj_id = oper_info['uuid']
obj = obj_class.locate(obj_id)
dependency_tracker = DependencyTracker(
DBBaseSM.get_obj_type_map(), self._REACTION_MAP)
dependency_tracker.evaluate(obj_type, obj)
elif oper_info['oper'] == 'UPDATE':
obj_id = oper_info['uuid']
obj = obj_class.get(obj_id)
old_dt = None
if obj is not None:
old_dt = DependencyTracker(
DBBaseSM.get_obj_type_map(), self._REACTION_MAP)
old_dt.evaluate(obj_type, obj)
else:
obj = obj_class.locate(obj_id)
obj.update()
dependency_tracker = DependencyTracker(
DBBaseSM.get_obj_type_map(), self._REACTION_MAP)
dependency_tracker.evaluate(obj_type, obj)
if old_dt:
for resource, ids in old_dt.resources.items():
if resource not in dependency_tracker.resources:
dependency_tracker.resources[resource] = ids
else:
dependency_tracker.resources[resource] = list(
set(dependency_tracker.resources[resource]) |
set(ids))
elif oper_info['oper'] == 'DELETE':
obj_id = oper_info['uuid']
obj = obj_class.get(obj_id)
if obj is None:
return
dependency_tracker = DependencyTracker(
DBBaseSM.get_obj_type_map(), self._REACTION_MAP)
dependency_tracker.evaluate(obj_type, obj)
obj_class.delete(obj_id)
else:
# unknown operation
self.logger.log_error('Unknown operation %s' % oper_info['oper'])
return
if obj is None:
self.logger.log_error('Error while accessing %s uuid %s' % (
obj_type, obj_id))
return
for res_type, res_id_list in dependency_tracker.resources.items():
if not res_id_list:
continue
cls = DBBaseSM.get_obj_type_map().get(res_type)
if cls is None:
continue
for res_id in res_id_list:
res_obj = cls.get(res_id)
if res_obj is not None:
res_obj.evaluate()
| sajuptpm/contrail-controller | src/config/svc-monitor/svc_monitor/rabbit.py | Python | apache-2.0 | 4,079 |
# Copyright 2014 - Mirantis, Inc.
# Copyright 2020 Nokia Software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from mistral.db.v2 import api as db_api
from mistral.lang import parser as spec_parser
from mistral.services import workbooks as wb_service
from mistral.tests.unit import base
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
WORKBOOK = """
---
version: '2.0'
name: my_wb
tags: [test]
actions:
concat:
base: std.echo
base-input:
output: "{$.str1}{$.str2}"
workflows:
wf1:
#Sample Comment 1
type: reverse
tags: [wf_test]
input:
- param1
output:
result: "{$.result}"
tasks:
task1:
action: std.echo output="{$.param1}"
publish:
result: "{$}"
wf2:
type: direct
output:
result: "{$.result}"
tasks:
task1:
workflow: my_wb.wf1 param1='Hi' task_name='task1'
publish:
result: "The result of subworkflow is '{$.final_result}'"
"""
WORKBOOK_WF1_DEFINITION = """wf1:
#Sample Comment 1
type: reverse
tags: [wf_test]
input:
- param1
output:
result: "{$.result}"
tasks:
task1:
action: std.echo output="{$.param1}"
publish:
result: "{$}"
"""
WORKBOOK_WF2_DEFINITION = """wf2:
type: direct
output:
result: "{$.result}"
tasks:
task1:
workflow: my_wb.wf1 param1='Hi' task_name='task1'
publish:
result: "The result of subworkflow is '{$.final_result}'"
"""
UPDATED_WORKBOOK = """
---
version: '2.0'
name: my_wb
tags: [test]
actions:
concat:
base: std.echo
base-input:
output: "{$.str1}{$.str2}"
workflows:
wf1:
type: direct
output:
result: "{$.result}"
tasks:
task1:
workflow: my_wb.wf2 param1='Hi' task_name='task1'
publish:
result: "The result of subworkflow is '{$.final_result}'"
wf2:
type: reverse
input:
- param1
output:
result: "{$.result}"
tasks:
task1:
action: std.echo output="{$.param1}"
publish:
result: "{$}"
"""
UPDATED_WORKBOOK_WF1_DEFINITION = """wf1:
type: direct
output:
result: "{$.result}"
tasks:
task1:
workflow: my_wb.wf2 param1='Hi' task_name='task1'
publish:
result: "The result of subworkflow is '{$.final_result}'"
"""
UPDATED_WORKBOOK_WF2_DEFINITION = """wf2:
type: reverse
input:
- param1
output:
result: "{$.result}"
tasks:
task1:
action: std.echo output="{$.param1}"
publish:
result: "{$}"
"""
ACTION_DEFINITION = """concat:
base: std.echo
base-input:
output: "{$.str1}{$.str2}"
"""
class WorkbookServiceTest(base.DbTestCase):
def test_create_workbook(self):
namespace = 'test_workbook_service_0123_namespace'
wb_db = wb_service.create_workbook_v2(WORKBOOK, namespace=namespace)
self.assertIsNotNone(wb_db)
self.assertEqual('my_wb', wb_db.name)
self.assertEqual(namespace, wb_db.namespace)
self.assertEqual(WORKBOOK, wb_db.definition)
self.assertIsNotNone(wb_db.spec)
self.assertListEqual(['test'], wb_db.tags)
db_actions = db_api.get_action_definitions(
name='my_wb.concat',
namespace=namespace
)
self.assertEqual(1, len(db_actions))
# Action.
action_db = self._assert_single_item(db_actions, name='my_wb.concat')
self.assertFalse(action_db.is_system)
action_spec = spec_parser.get_action_spec(action_db.spec)
self.assertEqual('concat', action_spec.get_name())
self.assertEqual('std.echo', action_spec.get_base())
self.assertEqual(ACTION_DEFINITION, action_db.definition)
db_wfs = db_api.get_workflow_definitions()
self.assertEqual(2, len(db_wfs))
# Workflow 1.
wf1_db = self._assert_single_item(db_wfs, name='my_wb.wf1')
wf1_spec = spec_parser.get_workflow_spec(wf1_db.spec)
self.assertEqual('wf1', wf1_spec.get_name())
self.assertEqual('reverse', wf1_spec.get_type())
self.assertListEqual(['wf_test'], wf1_spec.get_tags())
self.assertListEqual(['wf_test'], wf1_db.tags)
self.assertEqual(namespace, wf1_db.namespace)
self.assertEqual(WORKBOOK_WF1_DEFINITION, wf1_db.definition)
# Workflow 2.
wf2_db = self._assert_single_item(db_wfs, name='my_wb.wf2')
wf2_spec = spec_parser.get_workflow_spec(wf2_db.spec)
self.assertEqual('wf2', wf2_spec.get_name())
self.assertEqual('direct', wf2_spec.get_type())
self.assertEqual(namespace, wf2_db.namespace)
self.assertEqual(WORKBOOK_WF2_DEFINITION, wf2_db.definition)
def test_create_same_workbook_in_different_namespaces(self):
first_namespace = 'first_namespace'
second_namespace = 'second_namespace'
first_wb = wb_service.create_workbook_v2(WORKBOOK,
namespace=first_namespace)
self.assertIsNotNone(first_wb)
self.assertEqual('my_wb', first_wb.name)
self.assertEqual(first_namespace, first_wb.namespace)
second_wb = wb_service.create_workbook_v2(WORKBOOK,
namespace=second_namespace)
self.assertIsNotNone(second_wb)
self.assertEqual('my_wb', second_wb.name)
self.assertEqual(second_namespace, second_wb.namespace)
def test_create_workbook_with_default_namespace(self):
wb_db = wb_service.create_workbook_v2(WORKBOOK)
self.assertIsNotNone(wb_db)
self.assertEqual('my_wb', wb_db.name)
self.assertEqual('', wb_db.namespace)
db_api.delete_workbook('my_wb')
def test_update_workbook(self):
namespace = 'test_workbook_service_0123_namespace'
# Create workbook.
wb_db = wb_service.create_workbook_v2(WORKBOOK, namespace=namespace)
self.assertIsNotNone(wb_db)
self.assertEqual(2, len(db_api.get_workflow_definitions()))
# Update workbook.
wb_db = wb_service.update_workbook_v2(
UPDATED_WORKBOOK,
namespace=namespace
)
self.assertIsNotNone(wb_db)
self.assertEqual('my_wb', wb_db.name)
self.assertEqual(namespace, wb_db.namespace)
self.assertEqual(UPDATED_WORKBOOK, wb_db.definition)
self.assertListEqual(['test'], wb_db.tags)
db_wfs = db_api.get_workflow_definitions()
self.assertEqual(2, len(db_wfs))
# Workflow 1.
wf1_db = self._assert_single_item(db_wfs, name='my_wb.wf1')
wf1_spec = spec_parser.get_workflow_spec(wf1_db.spec)
self.assertEqual('wf1', wf1_spec.get_name())
self.assertEqual('direct', wf1_spec.get_type())
self.assertEqual(namespace, wf1_db.namespace)
self.assertEqual(UPDATED_WORKBOOK_WF1_DEFINITION, wf1_db.definition)
# Workflow 2.
wf2_db = self._assert_single_item(db_wfs, name='my_wb.wf2')
wf2_spec = spec_parser.get_workflow_spec(wf2_db.spec)
self.assertEqual('wf2', wf2_spec.get_name())
self.assertEqual('reverse', wf2_spec.get_type())
self.assertEqual(namespace, wf2_db.namespace)
self.assertEqual(UPDATED_WORKBOOK_WF2_DEFINITION, wf2_db.definition)
def test_delete_workbook(self):
namespace = 'pqr'
# Create workbook.
wb_service.create_workbook_v2(WORKBOOK, namespace=namespace)
db_wfs = db_api.get_workflow_definitions()
db_actions = db_api.get_action_definitions(name='my_wb.concat',
namespace=namespace)
self.assertEqual(2, len(db_wfs))
self.assertEqual(1, len(db_actions))
db_api.delete_workbook('my_wb', namespace=namespace)
db_wfs = db_api.get_workflow_definitions()
db_actions = db_api.get_action_definitions(name='my_wb.concat',
namespace=namespace)
# Deleting workbook shouldn't delete workflows and actions
self.assertEqual(2, len(db_wfs))
self.assertEqual(1, len(db_actions))
| openstack/mistral | mistral/tests/unit/services/test_workbook_service.py | Python | apache-2.0 | 8,847 |
# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
from neutron.plugins.ml2 import managers
from neutron.plugins.ml2 import rpc as rpc
from neutron_lib.agent import topics
class Tunnel_Calls(object):
"""Common tunnel calls for L2 agent."""
def __init__(self):
self._construct_rpc_stuff()
def _construct_rpc_stuff(self):
self.notifier = rpc.AgentNotifierApi(topics.AGENT)
self.type_manager = managers.TypeManager()
self.tunnel_rpc_obj = rpc.RpcCallbacks(self.notifier,
self.type_manager)
def trigger_tunnel_sync(self, context, tunnel_ip):
"""Sends tunnel sync RPC message to the neutron
L2 agent.
"""
tunnel_dict = {'tunnel_ip': tunnel_ip,
'tunnel_type': 'vxlan'}
self.tunnel_rpc_obj.tunnel_sync(context,
**tunnel_dict)
def trigger_l2pop_sync(self, context, other_fdb_entries):
"""Sends L2pop ADD RPC message to the neutron L2 agent."""
l2pop_rpc.L2populationAgentNotifyAPI(
).add_fdb_entries(context, other_fdb_entries)
def trigger_l2pop_delete(self, context, other_fdb_entries, host=None):
"""Sends L2pop DELETE RPC message to the neutron L2 agent."""
l2pop_rpc.L2populationAgentNotifyAPI(
).remove_fdb_entries(context, other_fdb_entries, host)
| openstack/networking-l2gw | networking_l2gw/services/l2gateway/common/tunnel_calls.py | Python | apache-2.0 | 2,017 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.enums",
marshal="google.ads.googleads.v10",
manifest={"WebpageConditionOperatorEnum",},
)
class WebpageConditionOperatorEnum(proto.Message):
r"""Container for enum describing webpage condition operator in
webpage criterion.
"""
class WebpageConditionOperator(proto.Enum):
r"""The webpage condition operator in webpage criterion."""
UNSPECIFIED = 0
UNKNOWN = 1
EQUALS = 2
CONTAINS = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| googleads/google-ads-python | google/ads/googleads/v10/enums/types/webpage_condition_operator.py | Python | apache-2.0 | 1,199 |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Big tensor games."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from open_spiel.python.algorithms.adidas_utils.helpers import misc
class TensorGame(object):
"""Tensor Game."""
def __init__(self, pt, seed=None):
"""Ctor. Inits payoff tensor (players x actions x ... np.array).
Args:
pt: payoff tensor, np.array
seed: seed for random number generator, used if computing best responses
"""
if np.any(pt < 0.):
raise ValueError("Payoff tensor must contain non-negative values")
self.pt = pt
self.seed = seed
self.random = np.random.RandomState(seed)
def num_players(self):
return self.pt.shape[0]
def num_strategies(self):
return self.pt.shape[1:]
def payoff_tensor(self):
return self.pt
def get_payoffs_for_strategies(self, policies):
"""Return vector of payoffs for all players given list of strategies.
Args:
policies: list of integers indexing strategies for each player
Returns:
np.array (length num players) of payoffs
"""
return self.pt[tuple([slice(None)] + policies)]
def best_response(self, mixed_strategy, return_exp=False):
"""Return best response and its superiority over the current strategy.
Args:
mixed_strategy: np.ndarray (distribution over strategies)
return_exp: bool, whether to return how much best response exploits the
given mixed strategy (default is False)
Returns:
br: int, index of strategy (ties split randomly)
exp: u(br) - u(mixed_strategy)
"""
logging.warn("Assumes symmetric game! Returns br for player 0.")
gradient = misc.pt_reduce(self.pt[0],
[mixed_strategy] * self.num_players(),
[0])
br = misc.argmax(self.random, gradient)
exp = gradient.max() - gradient.dot(mixed_strategy)
if return_exp:
return br, exp
else:
return br
def best_population_response(self, dist, policies):
"""Returns the best response to the current population of policies.
Args:
dist: np.ndarray, distribution over policies
policies: list of integers indexing strategies for each player
Returns:
best response, exploitability tuple (see best_response)
"""
ns = self.num_strategies()
mixed_strat = np.zeros(ns)
for pure_strat, prob in zip(policies, dist):
mixed_strat[pure_strat] += prob
return self.best_response(mixed_strat)
class ElFarol(TensorGame):
"""N-Player, 2-Action symmetric game with unique symmetric Nash."""
def __init__(self, n=2, c=0.5, B=0, S=1, G=2, seed=None):
"""Ctor. Initializes payoff tensor (N x (2,) * N np.array).
See Section 3.1, The El Farol Stage Game in
http://www.econ.ed.ac.uk/papers/id186_esedps.pdf
action 0: go to bar
action 1: avoid bar
Args:
n: int, number of players
c: float, threshold for `crowded' as a fraction of number of players
B: float, payoff for going to a crowded bar
S: float, payoff for staying at home
G: float, payoff for going to an uncrowded bar
seed: seed for random number generator, used if computing best responses
"""
assert G > S > B, "Game parameters must satisfy G > S > B."
pt = np.zeros((n,) + (2,) * n)
for idx in np.ndindex(pt.shape):
p = idx[0]
a = idx[1:]
a_i = a[p]
go_to_bar = (a_i < 1)
crowded = (n - 1 - sum(a) + a_i) >= (c * n)
if go_to_bar and not crowded:
pt[idx] = G
elif go_to_bar and crowded:
pt[idx] = B
else:
pt[idx] = S
super().__init__(pt, seed)
| deepmind/open_spiel | open_spiel/python/algorithms/adidas_utils/games/big.py | Python | apache-2.0 | 4,268 |
""" environ settings """
import environ
BASE_DIR = environ.Path(__file__) - 4
ENV_VAR = environ.Env()
| kaizentech/skeleton | config/settings/configurations/ENV.py | Python | apache-2.0 | 103 |
import os
from library.connecter.ansible.yaml import Yaml_Base
class Data_DB(Yaml_Base):
def router(self, content, name, yaml_tpye='main', file_type='tasks', preserve=True, together=False, describe=''):
'''
检测yaml数据的语法是否正确,如果含有include或/和roles,将把这些存储在后端数据库中
:参数
content:内容
name:名称,yaml文件内容写入数据的名称
preserve:是否写入数据库
together:是否返回该main下所有文件内容=
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,文件内容(格式为字典))
失败为False,返回失败原因
'''
if yaml_tpye in ('full_roles' , 'main') :
result = self.main(content, name, preserve=preserve, together=together, describe=describe)
elif yaml_tpye == 'include' :
result = self.include(content, name, file_type=file_type, preserve=preserve, describe=describe)
elif yaml_tpye == 'roles' :
result = self.roles(content, name, preserve=preserve, together=together, describe=describe)
else :
self.logger.error('动作:检测yaml数据的语法是否正确并将把这些存储在后端数据库中,执行结果:失败,原因:参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles')
return (False, '参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles')
return result
def main(self, content, name, preserve=True, together=False, describe=''):
'''
main文件的语法等是否正确,如果含有include或/和roles,将认为这些存储在后端数据库中
:参数
content:内容
name:名称,yaml文件内容写入数据的名称
preserve:是否写入数据库
together:是否返回该main下所有文件内容=
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,文件内容(格式为字典))
失败为False,返回失败原因
'''
result = self.yaml_loader(content, data_type='data')
if result[0] :
(content, yaml_data) = result[2:]
else :
self.logger.error('检测yaml数据名为' + name + '类型为full_roles或者main的语法失败,转化成yaml数据时失败,原因:' + result[1])
return (False, '转化成yaml数据时失败,' + result[1])
result = self.check_main(yaml_data)
self.logger.error(result)
if result[0] :
(roles_list, includefile_dict) = result[1:]
else :
self.logger.error('检测yaml数据名为' + name + '类型为full_roles或者main的语法失败,未通过yaml语法检测,原因:' + result[1])
return (False, '未通过yaml语法检测,' + result[1])
include_content_dict = {}
roles_content_dict = {}
for file in includefile_dict :
result = self.read2db(file, word_field='name')
if not result[0] :
self.logger.error('检测yaml数据名为' + name + '类型为full_roles或者main的语法失败,名为' + file + '的include查询出错,原因:' + result[1])
return (False, '名为' + file + '的include查询出错,' + result[1])
else :
try :
include_content = result[1]['content']
include_content_dict.update({file:include_content})
except :
self.logger.error('检测yaml数据名为' + name + '类型为full_roles或者main的语法失败,名为' + file + '的include查询出错,原因:查询结果不含content字段')
return (False, '名为' + file + '的include查询出错,查询结果不含content字段')
for roles in roles_list :
result = self.read2db(roles, word_field='name')
if result[0] :
try :
content_dict = result[1]['content']
if 'include' in content_dict :
include_content.update(content_dict['include'])
roles_content_dict.update({roles:content_dict['roles']})
except :
self.logger.error('检测yaml数据名为' + name + '类型为full_roles或者main的语法失败,名为' + roles + '的roles查询出错,查询结果不含content字段')
return (False, '名为' + roles + '的roles查询出错,查询结果不含content字段')
else :
return (False, '名为' + roles + '的roles查询出错,' + result[1])
data = {
'main' : content,
'include': include_content_dict,
'roles': roles_content_dict,
}
if preserve :
result = self.write2db(name, data, 'main', describe=describe)
if not result[0] :
self.logger.error('检测yaml数据名为' + name + '类型为full_roles或者main的语法失败,通过yaml语法检测,但无法写入数据库,原因:' + result[1])
return (False, '通过yaml语法检测,但无法写入数据库' + result[1])
self.logger.info('检测yaml数据名为' + name + '类型为full_roles或者main语法成功')
if together :
return (True, data)
else :
return (True, content)
def include(self, content, name, file_type='main', preserve=True, describe=''):
'''
main文件的语法等是否正确,如果含有include或/和roles,将认为这些存储在后端数据库中
:参数
content:内容
name:名称,yaml文件内容写入数据的名称
preserve:是否写入数据库
file_type:类型
together:是否返回该main下所有文件内容=
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,文件内容(格式为字典))
失败为False,返回失败原因
'''
result = self.yaml_loader(content, data_type='data')
if result[0] :
(content, yaml_data) = result[2:]
else :
self.logger.error('检测yaml数据名为' + name + '类型为include的语法失败,转化成yaml数据时失败,原因:' + result[1])
return (False, '转化成yaml数据时失败,' + result[1])
result = self.check_include(yaml_data, file_type=file_type)
if not result[0] :
self.logger.error('检测yaml数据名为' + name + '类型为include的语法失败,未通过yaml语法检测,原因:' + result[1])
return (False, '未通过yaml语法检测,' + result[1])
if preserve :
result = self.write2db(name, content, 'include', describe=describe)
if not result[0] :
self.logger.error('检测yaml数据名为' + name + '类型为include的语法失败,但无法写入数据库,原因:' + result[1])
return (False, '通过yaml语法检测,但无法写入数据库' + result[1])
self.logger.info('检测yaml数据名为' + name + '类型为include语法成功')
return (True, content)
def roles(self, content, name, preserve=True, together=False, describe=''):
'''
main文件的语法等是否正确,如果含有include或/和roles,将认为这些存储在后端数据库中
:参数
content:内容
name:名称,yaml文件内容写入数据的名称
preserve:是否写入数据库
together:是否返回该main下所有文件内容
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,文件内容(格式为字典))
失败为False,返回失败原因
'''
content_dict = {}
result = self._isrolesname(name)
if not result :
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法失败,未通过语法检测,原因:roles名不符合本系统要求的,注:虽然原生ansible支持这样写')
return (False, '未通过yaml语法检测,roles名不符合本系统要求的,注:虽然原生ansible支持这样写')
if not isinstance(content, dict) :
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法失败,未通过语法检测,原因:参数content必须是字典格式')
self.logger.error('roles名为' + str(name) + '未通过语法检测,原因:参数content必须是字典格式')
return (False, '未通过yaml语法检测,参数content必须是字典格式')
result = self.check_roles(content)
include_content_dict = {}
if result[0] :
includefile_dict = result[1]
for file in includefile_dict:
result = self.read2db(file, word_field='name')
if not result[0] :
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法失败,未通过语法检测,原因:' + 'include名为' + file + '的include查询出错,' + result[1])
return (False, '未通过yaml语法检测,名为' + file + '的include查询出错,' + result[1])
else :
try :
include_content = result[1]['content']
include_content_dict.update({file:include_content})
except :
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法失败,未通过语法检测,原因:' + '名为' + file + '的include查询出错,查询结果不含content关键字段')
return (False, '未通过yaml语法检测,名为' + file + '的include查询出错,查询结果不含content关键字段')
else :
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法失败,未通过yaml语法检测,语法错误,原因:' + result[1])
return (False, '未通过yaml语法检测,语法错误,' + result[1])
if 'templates' in content :
temp_content = content['templates']
if not isinstance(temp_content, dict) :
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法失败,未通过yaml语法检测,templates查询错误,查询结果的数据类型不为字典')
return (False, '未通过yaml语法检测,templates查询错误,查询结果的数据类型不为字典')
content_dict['templates'] = {}
for temp_file , tempfile_content in temp_content.items() :
temp_file = os.path.basename(temp_file)
content_dict['templates'][temp_file] = tempfile_content
data = {
'main' : {},
'include': include_content_dict,
'roles': content_dict,
}
if preserve :
result = self.write2db(name, data, 'roles', describe=describe)
if not result[0] :
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法失败,通过yaml语法检测,无法写入数据库,' + result[1])
return (False, '通过yaml语法检测,无法写入数据库,' + result[1])
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法成功')
if together :
return (True, content_dict, include_content)
else :
return (True, {}, {})
| lykops/lykops | library/connecter/ansible/yaml/data2db.py | Python | apache-2.0 | 12,807 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
from devstack import cfg
from devstack import component as comp
from devstack import log as logging
from devstack import shell as sh
from devstack import utils
from devstack.components import db
LOG = logging.getLogger("devstack.components.quantum")
# Openvswitch special settings
VSWITCH_PLUGIN = 'openvswitch'
V_PROVIDER = "quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin"
# Config files (some only modified if running as openvswitch)
PLUGIN_CONF = "plugins.ini"
QUANTUM_CONF = 'quantum.conf'
PLUGIN_LOC = ['etc']
AGENT_CONF = 'ovs_quantum_plugin.ini'
AGENT_LOC = ["etc", "quantum", "plugins", "openvswitch"]
AGENT_BIN_LOC = ["quantum", "plugins", "openvswitch", 'agent']
CONFIG_FILES = [PLUGIN_CONF, AGENT_CONF]
# This db will be dropped and created
DB_NAME = 'ovs_quantum'
# Opensvswitch bridge setup/teardown/name commands
OVS_BRIDGE_DEL = ['ovs-vsctl', '--no-wait', '--', '--if-exists', 'del-br', '%OVS_BRIDGE%']
OVS_BRIDGE_ADD = ['ovs-vsctl', '--no-wait', 'add-br', '%OVS_BRIDGE%']
OVS_BRIDGE_EXTERN_ID = ['ovs-vsctl', '--no-wait', 'br-set-external-id', '%OVS_BRIDGE%', 'bridge-id', '%OVS_EXTERNAL_ID%']
OVS_BRIDGE_CMDS = [OVS_BRIDGE_DEL, OVS_BRIDGE_ADD, OVS_BRIDGE_EXTERN_ID]
# Subdirs of the downloaded
CONFIG_DIR = 'etc'
BIN_DIR = 'bin'
# What to start (only if openvswitch enabled)
APP_Q_SERVER = 'quantum-server'
APP_Q_AGENT = 'ovs_quantum_agent.py'
APP_OPTIONS = {
APP_Q_SERVER: ["%QUANTUM_CONFIG_FILE%"],
APP_Q_AGENT: ["%OVS_CONFIG_FILE%", "-v"],
}
class QuantumUninstaller(comp.PkgUninstallComponent):
def __init__(self, *args, **kargs):
comp.PkgUninstallComponent.__init__(self, *args, **kargs)
class QuantumInstaller(comp.PkgInstallComponent):
def __init__(self, *args, **kargs):
comp.PkgInstallComponent.__init__(self, *args, **kargs)
self.q_vswitch_agent = False
self.q_vswitch_service = False
plugin = self.cfg.getdefaulted("quantum", "q_plugin", VSWITCH_PLUGIN)
if plugin == VSWITCH_PLUGIN:
self.q_vswitch_agent = True
self.q_vswitch_service = True
def _get_download_locations(self):
places = list()
places.append({
'uri': ("git", "quantum_repo"),
'branch': ("git", "quantum_branch"),
})
return places
def known_options(self):
return set(['no-ovs-db-init', 'no-ovs-bridge-init'])
def _get_config_files(self):
return list(CONFIG_FILES)
def _get_target_config_name(self, config_fn):
if config_fn == PLUGIN_CONF:
tgt_loc = [self.app_dir] + PLUGIN_LOC + [config_fn]
return sh.joinpths(*tgt_loc)
elif config_fn == AGENT_CONF:
tgt_loc = [self.app_dir] + AGENT_LOC + [config_fn]
return sh.joinpths(*tgt_loc)
else:
return comp.PkgInstallComponent._get_target_config_name(self, config_fn)
def _config_adjust(self, contents, config_fn):
if config_fn == PLUGIN_CONF and self.q_vswitch_service:
# Need to fix the "Quantum plugin provider module"
newcontents = contents
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
config.readfp(stream)
provider = config.get("PLUGIN", "provider")
if provider != V_PROVIDER:
config.set("PLUGIN", "provider", V_PROVIDER)
with io.BytesIO() as outputstream:
config.write(outputstream)
outputstream.flush()
newcontents = cfg.add_header(config_fn, outputstream.getvalue())
return newcontents
elif config_fn == AGENT_CONF and self.q_vswitch_agent:
# Need to adjust the sql connection
newcontents = contents
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
config.readfp(stream)
db_dsn = config.get("DATABASE", "sql_connection")
if db_dsn:
generated_dsn = db.fetch_dbdsn(self.cfg, self.pw_gen, DB_NAME)
if generated_dsn != db_dsn:
config.set("DATABASE", "sql_connection", generated_dsn)
with io.BytesIO() as outputstream:
config.write(outputstream)
outputstream.flush()
newcontents = cfg.add_header(config_fn, outputstream.getvalue())
return newcontents
else:
return comp.PkgInstallComponent._config_adjust(self, contents, config_fn)
def _setup_bridge(self):
if not self.q_vswitch_agent or \
'no-ovs-bridge-init' in self.options:
return
bridge = self.cfg.getdefaulted("quantum", "ovs_bridge", 'br-int')
LOG.info("Fixing up ovs bridge named %s.", bridge)
external_id = self.cfg.getdefaulted("quantum", 'ovs_bridge_external_name', bridge)
params = dict()
params['OVS_BRIDGE'] = bridge
params['OVS_EXTERNAL_ID'] = external_id
cmds = list()
for cmd_templ in OVS_BRIDGE_CMDS:
cmds.append({
'cmd': cmd_templ,
'run_as_root': True,
})
utils.execute_template(*cmds, params=params)
def post_install(self):
comp.PkgInstallComponent.post_install(self)
self._setup_db()
self._setup_bridge()
def _setup_db(self):
if not self.q_vswitch_service or \
'no-ovs-db-init' in self.options:
return
LOG.info("Fixing up database named %s.", DB_NAME)
db.drop_db(self.cfg, self.pw_gen, self.distro, DB_NAME)
db.create_db(self.cfg, self.pw_gen, self.distro, DB_NAME)
def _get_source_config(self, config_fn):
if config_fn == PLUGIN_CONF:
srcloc = [self.app_dir] + PLUGIN_LOC + [config_fn]
srcfn = sh.joinpths(*srcloc)
contents = sh.load_file(srcfn)
return (srcfn, contents)
elif config_fn == AGENT_CONF:
srcloc = [self.app_dir] + AGENT_LOC + [config_fn]
srcfn = sh.joinpths(*srcloc)
contents = sh.load_file(srcfn)
return (srcfn, contents)
else:
return comp.PkgInstallComponent._get_source_config(self, config_fn)
class QuantumRuntime(comp.ProgramRuntime):
def __init__(self, *args, **kargs):
comp.ProgramRuntime.__init__(self, *args, **kargs)
self.q_vswitch_agent = False
self.q_vswitch_service = False
plugin = self.cfg.getdefaulted("quantum", "q_plugin", VSWITCH_PLUGIN)
if plugin == VSWITCH_PLUGIN:
# Default to on if not specified
self.q_vswitch_agent = True
self.q_vswitch_service = True
def _get_apps_to_start(self):
app_list = comp.ProgramRuntime._get_apps_to_start(self)
if self.q_vswitch_service:
app_list.append({
'name': APP_Q_SERVER,
'path': sh.joinpths(self.app_dir, BIN_DIR, APP_Q_SERVER),
})
if self.q_vswitch_agent:
full_pth = [self.app_dir] + AGENT_BIN_LOC + [APP_Q_AGENT]
app_list.append({
'name': APP_Q_AGENT,
'path': sh.joinpths(*full_pth)
})
return app_list
def _get_app_options(self, app_name):
return APP_OPTIONS.get(app_name)
def _get_param_map(self, app_name):
param_dict = comp.ProgramRuntime._get_param_map(self, app_name)
if app_name == APP_Q_AGENT:
tgt_loc = [self.app_dir] + AGENT_LOC + [AGENT_CONF]
param_dict['OVS_CONFIG_FILE'] = sh.joinpths(*tgt_loc)
elif app_name == APP_Q_SERVER:
param_dict['QUANTUM_CONFIG_FILE'] = sh.joinpths(self.app_dir, CONFIG_DIR, QUANTUM_CONF)
return param_dict
| hagleitn/Openstack-Devstack2 | devstack/components/quantum.py | Python | apache-2.0 | 8,662 |
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from neutron.agent.l2.extensions import manager as ext_manager
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import constants as n_constants
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config
from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
import exceptions as exc
from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm
LOG = logging.getLogger(__name__)
class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
target = oslo_messaging.Target(version='1.1')
def __init__(self, context, agent, sg_agent):
super(SriovNicSwitchRpcCallbacks, self).__init__()
self.context = context
self.agent = agent
self.sg_agent = sg_agent
def port_update(self, context, **kwargs):
LOG.debug("port_update received")
port = kwargs.get('port')
# Put the port mac address in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
mac = port['mac_address']
pci_slot = None
if port.get('binding:profile'):
pci_slot = port['binding:profile'].get('pci_slot')
if pci_slot:
self.agent.updated_devices.add((mac, pci_slot))
LOG.debug("port_update RPC received for port: %(id)s with MAC "
"%(mac)s and PCI slot %(pci_slot)s slot",
{'id': port['id'], 'mac': mac, 'pci_slot': pci_slot})
else:
LOG.debug("No PCI Slot for port %(id)s with MAC %(mac)s; "
"skipping", {'id': port['id'], 'mac': mac,
'pci_slot': pci_slot})
class SriovNicSwitchAgent(object):
def __init__(self, physical_devices_mappings, exclude_devices,
polling_interval):
self.polling_interval = polling_interval
self.conf = cfg.CONF
self.setup_eswitch_mgr(physical_devices_mappings,
exclude_devices)
configurations = {'device_mappings': physical_devices_mappings}
self.agent_state = {
'binary': 'neutron-sriov-nic-agent',
'host': self.conf.host,
'topic': n_constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': n_constants.AGENT_TYPE_NIC_SWITCH,
'start_flag': True}
# Stores port update notifications for processing in the main loop
self.updated_devices = set()
self.mac_to_port_id_mapping = {}
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc)
self._setup_rpc()
self.ext_manager = self._create_agent_extension_manager(
self.connection)
# The initialization is complete; we can start receiving messages
self.connection.consume_in_threads()
# Initialize iteration counter
self.iter_num = 0
def _setup_rpc(self):
self.agent_id = 'nic-switch-agent.%s' % socket.gethostname()
LOG.info(_LI("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
# Handle updates from service
self.endpoints = [SriovNicSwitchRpcCallbacks(self.context, self,
self.sg_agent)]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers,
start_listening=False)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
devices = len(self.eswitch_mgr.get_assigned_devices_info())
self.agent_state.get('configurations')['devices'] = devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def _create_agent_extension_manager(self, connection):
ext_manager.register_opts(self.conf)
mgr = ext_manager.AgentExtensionsManager(self.conf)
mgr.initialize(connection, 'sriov')
return mgr
def setup_eswitch_mgr(self, device_mappings, exclude_devices={}):
self.eswitch_mgr = esm.ESwitchManager()
self.eswitch_mgr.discover_devices(device_mappings, exclude_devices)
def scan_devices(self, registered_devices, updated_devices):
curr_devices = self.eswitch_mgr.get_assigned_devices_info()
device_info = {}
device_info['current'] = curr_devices
device_info['added'] = curr_devices - registered_devices
# we don't want to process updates for devices that don't exist
device_info['updated'] = updated_devices & curr_devices
# we need to clean up after devices are removed
device_info['removed'] = registered_devices - curr_devices
return device_info
def _device_info_has_changes(self, device_info):
return (device_info.get('added')
or device_info.get('updated')
or device_info.get('removed'))
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
self.sg_agent.prepare_devices_filter(device_info.get('added'))
if device_info.get('updated'):
self.sg_agent.refresh_firewall()
# Updated devices are processed the same as new ones, as their
# admin_state_up may have changed. The set union prevents duplicating
# work when a device is new and updated in the same polling iteration.
devices_added_updated = (set(device_info.get('added'))
| set(device_info.get('updated')))
if devices_added_updated:
resync_a = self.treat_devices_added_updated(devices_added_updated)
if device_info.get('removed'):
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def treat_device(self, device, pci_slot, admin_state_up, spoofcheck=True):
if self.eswitch_mgr.device_exists(device, pci_slot):
try:
self.eswitch_mgr.set_device_spoofcheck(device, pci_slot,
spoofcheck)
except Exception:
LOG.warning(_LW("Failed to set spoofcheck for device %s"),
device)
LOG.info(_LI("Device %(device)s spoofcheck %(spoofcheck)s"),
{"device": device, "spoofcheck": spoofcheck})
try:
self.eswitch_mgr.set_device_state(device, pci_slot,
admin_state_up)
except exc.SriovNicError:
LOG.exception(_LE("Failed to set device %s state"), device)
return
if admin_state_up:
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
LOG.info(_LI("No device with MAC %s defined on agent."), device)
def treat_devices_added_updated(self, devices_info):
try:
macs_list = set([device_info[0] for device_info in devices_info])
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context, macs_list, self.agent_id)
except Exception as e:
LOG.debug("Unable to get port details for devices "
"with MAC addresses %(devices)s: %(e)s",
{'devices': macs_list, 'e': e})
# resync is needed
return True
for device_details in devices_details_list:
device = device_details['device']
LOG.debug("Port with MAC address %s is added", device)
if 'port_id' in device_details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': device_details})
port_id = device_details['port_id']
self.mac_to_port_id_mapping[device] = port_id
profile = device_details['profile']
spoofcheck = device_details.get('port_security_enabled', True)
self.treat_device(device,
profile.get('pci_slot'),
device_details['admin_state_up'],
spoofcheck)
self.ext_manager.handle_port(self.context, device_details)
else:
LOG.info(_LI("Device with MAC %s not defined on plugin"),
device)
return False
def treat_devices_removed(self, devices):
resync = False
for device in devices:
mac, pci_slot = device
LOG.info(_LI("Removing device with MAC address %(mac)s and "
"PCI slot %(pci_slot)s"),
{'mac': mac, 'pci_slot': pci_slot})
try:
port_id = self.mac_to_port_id_mapping.get(mac)
if port_id:
profile = {'pci_slot': pci_slot}
port = {'port_id': port_id,
'device': mac,
'profile': profile}
self.ext_manager.delete_port(self.context, port)
del self.mac_to_port_id_mapping[mac]
else:
LOG.warning(_LW("port_id to device with MAC "
"%s not found"), mac)
dev_details = self.plugin_rpc.update_device_down(self.context,
mac,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug("Removing port failed for device with MAC address "
"%(mac)s and PCI slot %(pci_slot)s due to %(exc)s",
{'mac': mac, 'pci_slot': pci_slot, 'exc': e})
resync = True
continue
if dev_details['exists']:
LOG.info(_LI("Port with MAC %(mac)s and PCI slot "
"%(pci_slot)s updated."),
{'mac': mac, 'pci_slot': pci_slot})
else:
LOG.debug("Device with MAC %(mac)s and PCI slot "
"%(pci_slot)s not defined on plugin",
{'mac': mac, 'pci_slot': pci_slot})
return resync
def daemon_loop(self):
sync = True
devices = set()
LOG.info(_LI("SRIOV NIC Agent RPC Daemon Started!"))
while True:
start = time.time()
LOG.debug("Agent rpc_loop - iteration:%d started",
self.iter_num)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
devices.clear()
sync = False
device_info = {}
# Save updated devices dict to perform rollback in case
# resync would be needed, and then clear self.updated_devices.
# As the greenthread should not yield between these
# two statements, this will should be thread-safe.
updated_devices_copy = self.updated_devices
self.updated_devices = set()
try:
device_info = self.scan_devices(devices, updated_devices_copy)
if self._device_info_has_changes(device_info):
LOG.debug("Agent loop found changes! %s", device_info)
# If treat devices fails - indicates must resync with
# plugin
sync = self.process_network_devices(device_info)
devices = device_info['current']
except Exception:
LOG.exception(_LE("Error in agent loop. Devices info: %s"),
device_info)
sync = True
# Restore devices that were removed from this set earlier
# without overwriting ones that may have arrived since.
self.updated_devices |= updated_devices_copy
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
class SriovNicAgentConfigParser(object):
def __init__(self):
self.device_mappings = {}
self.exclude_devices = {}
def parse(self):
"""Parses device_mappings and exclude_devices.
Parse and validate the consistency in both mappings
"""
self.device_mappings = n_utils.parse_mappings(
cfg.CONF.SRIOV_NIC.physical_device_mappings)
self.exclude_devices = config.parse_exclude_devices(
cfg.CONF.SRIOV_NIC.exclude_devices)
self._validate()
def _validate(self):
"""Validate configuration.
Validate that network_device in excluded_device
exists in device mappings
"""
dev_net_set = set(self.device_mappings.values())
for dev_name in self.exclude_devices.keys():
if dev_name not in dev_net_set:
raise ValueError(_("Device name %(dev_name)s is missing from "
"physical_device_mappings") % {'dev_name':
dev_name})
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
config_parser = SriovNicAgentConfigParser()
config_parser.parse()
device_mappings = config_parser.device_mappings
exclude_devices = config_parser.exclude_devices
except ValueError:
LOG.exception(_LE("Failed on Agent configuration parse. "
"Agent terminated!"))
raise SystemExit(1)
LOG.info(_LI("Physical Devices mappings: %s"), device_mappings)
LOG.info(_LI("Exclude Devices: %s"), exclude_devices)
polling_interval = cfg.CONF.AGENT.polling_interval
try:
agent = SriovNicSwitchAgent(device_mappings,
exclude_devices,
polling_interval)
except exc.SriovNicError:
LOG.exception(_LE("Agent Initialization Failed"))
raise SystemExit(1)
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
agent.daemon_loop()
if __name__ == '__main__':
main()
| silenci/neutron | neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py | Python | apache-2.0 | 17,661 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction.
FeatureColumns provide a high level abstraction for ingesting and representing
features in tf.learn Estimator models.
FeatureColumns are the primary way of encoding features for pre-canned
tf.learn Estimators.
When using FeatureColumns with tf.learn models, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
(1) Feature type:
* Continuous features can be represented by `real_valued_column`.
* Categorical features can be represented by any `sparse_column_with_*`
column (`sparse_column_with_keys`, `sparse_column_with_vocabulary_file`,
`sparse_column_with_hash_bucket`, `sparse_column_with_integerized_feature`).
(2) Model type:
* Deep neural network models (`DNNClassifier`, `DNNRegressor`).
Continuous features can be directly fed into deep neural network models.
age_column = real_valued_column("age")
To feed sparse features into DNN models, wrap the column with
`embedding_column` or `one_hot_column`. `one_hot_column` is recommended for
features with only a few possible values. For features with many possible
values, `embedding_column` is recommended.
embedded_dept_column = embedding_column(
sparse_column_with_keys("department", ["math", "philosphy", ...]),
dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
Sparse features can be fed directly into linear models.
dept_column = sparse_column_with_keys("department",
["math", "philosophy", "english"])
It is recommended that continuous features be bucketized before being
fed into linear models.
bucketized_age_column = bucketized_column(
source_column=age_column,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
Sparse features can be crossed (also known as conjuncted or combined) in
order to form non-linearities, and then fed into linear models.
cross_dept_age_column = crossed_column(
columns=[department_column, bucketized_age_column],
hash_bucket_size=1000)
Example of building tf.learn model using FeatureColumns:
# Define features and transformations
deep_feature_columns = [age_column, embedded_dept_column]
wide_feature_columns = [dept_column, bucketized_age_column,
cross_dept_age_column]
# Build deep model
estimator = DNNClassifier(
feature_columns=deep_feature_columns,
hidden_units=[500, 250, 50])
estimator.train(...)
# Or build a wide model
estimator = LinearClassifier(
feature_columns=wide_feature_columns)
estimator.train(...)
# Or build a wide and deep model!
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=wide_feature_columns,
dnn_feature_columns=deep_feature_columns,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
FeatureColumns can also be transformed into a generic input layer for
custom models using `input_from_feature_columns` within
`feature_column_ops.py`.
Example of building non-tf.learn model using FeatureColumns:
# Building model via layers
deep_feature_columns = [age_column, embedded_dept_column]
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=deep_feature_columns)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=deep_feature_columns)
second_layer = fully_connected(first_layer, ...)
See feature_column_ops_test for more examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
import six
from tensorflow.contrib import lookup
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.ops import bucketization_op
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.contrib.layers.python.ops import sparse_ops as contrib_sparse_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
class _LinearEmbeddingLookupArguments(
collections.namedtuple("_LinearEmbeddingLookupArguments",
["input_tensor",
"weight_tensor",
"vocab_size",
"initializer",
"combiner"])):
"""Represents the information needed from a column for embedding lookup.
Used to to compute DNN inputs and weighted sum.
"""
pass
class _DeepEmbeddingLookupArguments(
collections.namedtuple("_DeepEmbeddingLookupArguments",
["input_tensor",
"weight_tensor",
"vocab_size",
"initializer",
"combiner",
"dimension",
"shared_embedding_name",
"hash_key",
"max_norm",
"trainable"])):
"""Represents the information needed from a column for embedding lookup.
Used to to compute DNN inputs and weighted sum.
"""
pass
class _FeatureColumn(object):
"""Represents a feature column abstraction.
To distinguish the concept of a feature family and a specific binary feature
within a family, we refer to a feature family like "country" as a feature
column. For example "country:US" is a feature which is in "country" feature
column and has a feature value ("US").
This class is an abstract class. User should not create one instance of this.
Following classes (_SparseColumn, _RealValuedColumn, ...) are concrete
instances.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def name(self):
"""Returns the name of column or transformed column."""
pass
@abc.abstractproperty
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def config(self):
"""Returns configuration of the base feature for `tf.parse_example`."""
pass
@abc.abstractproperty
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
pass
@abc.abstractmethod
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
raise NotImplementedError("Transform is not implemented for {}.".format(
self))
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collection=None,
trainable=True,
output_rank=2):
"""Returns a Tensor as an input to the first layer of neural network."""
raise ValueError("Calling an abstract method.")
def _deep_embedding_lookup_arguments(self, input_tensor):
"""Returns arguments to embedding lookup to build an input layer."""
raise NotImplementedError(
"No deep embedding lookup arguments for column {}.".format(self))
# It is expected that classes implement either wide_embedding_lookup_arguments
# or to_dense_tensor to be used in linear models.
# pylint: disable=unused-argument
def _wide_embedding_lookup_arguments(self, input_tensor):
"""Returns arguments to look up embeddings for this column."""
raise NotImplementedError(
"No wide embedding lookup arguments for column {}.".format(self))
# pylint: disable=unused-argument
def _to_dense_tensor(self, input_tensor):
"""Returns a dense tensor representing this column's values."""
raise NotImplementedError(
"No dense tensor representation for column {}.".format(self))
def _checkpoint_path(self):
"""Returns None, or a (path,tensor_name) to load a checkpoint from."""
return None
def _key_without_properties(self, properties):
"""Helper method for self.key() that omits particular properties."""
fields_values = []
# pylint: disable=protected-access
for i, k in enumerate(self._fields):
if k in properties:
# Excludes a property from the key.
# For instance, exclude `initializer` from the key of EmbeddingColumn
# since we don't support users specifying different initializers for
# the same embedding column. Ditto for `normalizer` and
# RealValuedColumn.
# Special treatment is needed since the default str form of a
# function contains its address, which could introduce non-determinism
# in sorting.
continue
fields_values.append("{}={}".format(k, self[i]))
# pylint: enable=protected-access
# This is effectively the same format as str(self), except with our special
# treatment.
return "{}({})".format(type(self).__name__, ", ".join(fields_values))
# TODO(b/30410315): Support warm starting in all feature columns.
class _SparseColumn(_FeatureColumn,
collections.namedtuple("_SparseColumn",
["column_name", "is_integerized",
"bucket_size", "lookup_config",
"combiner", "dtype"])):
"""Represents a sparse feature column also known as categorical features.
Instances of this class are immutable. A sparse column means features are
sparse and dictionary returned by InputBuilder contains a
("column_name", SparseTensor) pair.
One and only one of bucket_size or lookup_config should be set. If
is_integerized is True then bucket_size should be set.
Attributes:
column_name: A string defining sparse column name.
is_integerized: A bool if True means type of feature is an integer.
Integerized means we can use the feature itself as id.
bucket_size: An int that is > 0. The number of buckets.
lookup_config: A _SparseIdLookupConfig defining feature-to-id lookup
configuration
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features, such as `tf.string` or `tf.int64`.
Raises:
TypeError: if lookup_config is not a _SparseIdLookupConfig.
ValueError: if above expectations about input fails.
"""
def __new__(cls,
column_name,
is_integerized=False,
bucket_size=None,
lookup_config=None,
combiner="sum",
dtype=dtypes.string):
if is_integerized and bucket_size is None:
raise ValueError("bucket_size must be set if is_integerized is True. "
"column_name: {}".format(column_name))
if is_integerized and not dtype.is_integer:
raise ValueError("dtype must be an integer if is_integerized is True. "
"dtype: {}, column_name: {}.".format(dtype, column_name))
if dtype != dtypes.string and not dtype.is_integer:
raise ValueError("dtype must be string or integer. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if bucket_size is None and lookup_config is None:
raise ValueError("one of bucket_size or lookup_config must be set. "
"column_name: {}".format(column_name))
if bucket_size is not None and lookup_config:
raise ValueError("one and only one of bucket_size or lookup_config "
"must be set. column_name: {}".format(column_name))
if bucket_size is not None and bucket_size < 1:
raise ValueError("bucket_size must be at least 1. "
"bucket_size: {}, column_name: {}".format(bucket_size,
column_name))
if ((lookup_config) and
(not isinstance(lookup_config, _SparseIdLookupConfig))):
raise TypeError(
"lookup_config must be an instance of _SparseIdLookupConfig. "
"Given one is in type {} for column_name {}".format(
type(lookup_config), column_name))
if (lookup_config and lookup_config.vocabulary_file and
lookup_config.vocab_size is None):
raise ValueError("vocab_size must be defined. "
"column_name: {}".format(column_name))
return super(_SparseColumn, cls).__new__(
cls,
column_name,
is_integerized=is_integerized,
bucket_size=bucket_size,
lookup_config=lookup_config,
combiner=combiner,
dtype=dtype)
@property
def name(self):
return self.column_name
@property
def length(self):
"""Returns vocabulary or hash_bucket size."""
if self.bucket_size is not None:
return self.bucket_size
return self.lookup_config.vocab_size + self.lookup_config.num_oov_buckets
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(self.dtype)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor
# pylint: disable=unused-argument
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return None
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
raise ValueError(
"SparseColumn is not supported in DNN. "
"Please use embedding_column or one_hot_column. column: {}".format(
self))
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.id_tensor(input_tensor),
weight_tensor=self.weight_tensor(input_tensor),
vocab_size=self.length,
initializer=init_ops.zeros_initializer(),
combiner=self.combiner)
def _get_input_sparse_tensor(self, columns_to_tensors):
"""Looks up the input tensor for transformation and sparsify it if dense."""
input_tensor = columns_to_tensors[self.name]
if not isinstance(input_tensor, sparse_tensor_py.SparseTensor):
# To avoid making any assumptions about which values are to be ignored,
# we set ignore_value to -1 for numeric tensors to avoid excluding valid
# indices.
if input_tensor.dtype == dtypes.string:
ignore_value = ""
else:
ignore_value = -1
input_tensor = _reshape_real_valued_tensor(input_tensor, 2, self.name)
input_tensor = contrib_sparse_ops.dense_to_sparse_tensor(
input_tensor, ignore_value=ignore_value)
return input_tensor
def is_compatible(self, other_column):
"""Check compatability of two sparse columns."""
if self.lookup_config and other_column.lookup_config:
return self.lookup_config == other_column.lookup_config
compatible = (self.length == other_column.length and
(self.dtype == other_column.dtype or
(self.dtype.is_integer and other_column.dtype.is_integer)))
if compatible:
logging.warn("Column {} and {} may not have the same vocabulary.".
format(self.name, other_column.name))
return compatible
class _SparseColumnIntegerized(_SparseColumn):
"""See `sparse_column_with_integerized_feature`."""
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
input_tensor = self._get_input_sparse_tensor(columns_to_tensors)
sparse_id_values = math_ops.mod(input_tensor.values, self.bucket_size,
name="mod")
columns_to_tensors[self] = sparse_tensor_py.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
def sparse_column_with_integerized_feature(column_name,
bucket_size,
combiner="sum",
dtype=dtypes.int64):
"""Creates an integerized _SparseColumn.
Use this when your features are already pre-integerized into int64 IDs, that
is, when the set of values to output is already coming in as what's desired in
the output. Integerized means we can use the feature value itself as id.
Typically this is used for reading contiguous ranges of integers indexes, but
it doesn't have to be. The output value is simply copied from the
input_feature, whatever it is. Just be aware, however, that if you have large
gaps of unused integers it might affect what you feed those in (for instance,
if you make up a one-hot tensor from these, the unused integers will appear as
values in the tensor which are always zero.)
Args:
column_name: A string defining sparse column name.
bucket_size: An int that is > 1. The number of buckets. It should be bigger
than maximum feature. In other words features in this column should be an
int64 in range [0, bucket_size)
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features. It should be an integer type. Default value is
dtypes.int64.
Returns:
An integerized _SparseColumn definition.
Raises:
ValueError: bucket_size is not greater than 1.
ValueError: dtype is not integer.
"""
return _SparseColumnIntegerized(
column_name, is_integerized=True, bucket_size=bucket_size,
combiner=combiner, dtype=dtype)
class _SparseColumnHashed(_SparseColumn):
"""See `sparse_column_with_hash_bucket`."""
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
input_tensor = self._get_input_sparse_tensor(columns_to_tensors)
if self.dtype.is_integer:
sparse_values = string_ops.as_string(input_tensor.values)
else:
sparse_values = input_tensor.values
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.bucket_size, name="lookup")
columns_to_tensors[self] = sparse_tensor_py.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
def sparse_column_with_hash_bucket(column_name,
hash_bucket_size,
combiner="sum",
dtype=dtypes.string):
"""Creates a _SparseColumn with hashed bucket configuration.
Use this when your sparse features are in string or integer format, but you
don't have a vocab file that maps each value to an integer ID.
output_id = Hash(input_feature_string) % bucket_size
Args:
column_name: A string defining sparse column name.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A _SparseColumn with hashed bucket configuration
Raises:
ValueError: hash_bucket_size is not greater than 2.
ValueError: dtype is neither string nor integer.
"""
return _SparseColumnHashed(
column_name,
bucket_size=hash_bucket_size,
combiner=combiner,
dtype=dtype)
class _SparseColumnKeys(_SparseColumn):
"""See `sparse_column_with_keys`."""
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
input_tensor = self._get_input_sparse_tensor(columns_to_tensors)
table = lookup.index_table_from_tensor(
mapping=tuple(self.lookup_config.keys),
default_value=self.lookup_config.default_value,
dtype=self.dtype,
name="lookup")
columns_to_tensors[self] = table.lookup(input_tensor)
def sparse_column_with_keys(
column_name, keys, default_value=-1, combiner="sum", dtype=dtypes.string):
"""Creates a _SparseColumn with keys.
Look up logic is as follows:
lookup_id = index_of_feature_in_keys if feature in keys else default_value
Args:
column_name: A string defining sparse column name.
keys: A list or tuple defining vocabulary. Must be castable to `dtype`.
default_value: The value to use for out-of-vocabulary feature values.
Default is -1.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features. Only integer and string are supported.
Returns:
A _SparseColumnKeys with keys configuration.
"""
keys = tuple(keys)
return _SparseColumnKeys(
column_name,
lookup_config=_SparseIdLookupConfig(
keys=keys, vocab_size=len(keys), default_value=default_value),
combiner=combiner,
dtype=dtype)
class _SparseColumnVocabulary(_SparseColumn):
"""See `sparse_column_with_vocabulary_file`."""
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
st = self._get_input_sparse_tensor(columns_to_tensors)
if self.dtype.is_integer:
sparse_string_values = string_ops.as_string(st.values)
sparse_string_tensor = sparse_tensor_py.SparseTensor(st.indices,
sparse_string_values,
st.dense_shape)
else:
sparse_string_tensor = st
table = lookup.string_to_index_table_from_file(
vocabulary_file=self.lookup_config.vocabulary_file,
num_oov_buckets=self.lookup_config.num_oov_buckets,
vocab_size=self.lookup_config.vocab_size,
default_value=self.lookup_config.default_value,
name=self.name + "_lookup")
columns_to_tensors[self] = table.lookup(sparse_string_tensor)
def sparse_column_with_vocabulary_file(column_name,
vocabulary_file,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
combiner="sum",
dtype=dtypes.string):
"""Creates a _SparseColumn with vocabulary file configuration.
Use this when your sparse features are in string or integer format, and you
have a vocab file that maps each value to an integer ID.
output_id = LookupIdFromVocab(input_feature_string)
Args:
column_name: A string defining sparse column name.
vocabulary_file: The vocabulary filename.
num_oov_buckets: The number of out-of-vocabulary buckets. If zero all out of
vocabulary features will be ignored.
vocab_size: Number of the elements in the vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A _SparseColumn with vocabulary file configuration.
Raises:
ValueError: vocab_size is not defined.
ValueError: dtype is neither string nor integer.
"""
if vocab_size is None:
raise ValueError("vocab_size should be defined. "
"column_name: {}".format(column_name))
return _SparseColumnVocabulary(
column_name,
lookup_config=_SparseIdLookupConfig(
vocabulary_file=vocabulary_file,
num_oov_buckets=num_oov_buckets,
vocab_size=vocab_size,
default_value=default_value),
combiner=combiner,
dtype=dtype)
class _WeightedSparseColumn(_FeatureColumn, collections.namedtuple(
"_WeightedSparseColumn",
["sparse_id_column", "weight_column_name", "dtype"])):
"""See `weighted_sparse_column`."""
def __new__(cls, sparse_id_column, weight_column_name, dtype):
return super(_WeightedSparseColumn, cls).__new__(cls, sparse_id_column,
weight_column_name, dtype)
@property
def name(self):
return "{}_weighted_by_{}".format(self.sparse_id_column.name,
self.weight_column_name)
@property
def length(self):
"""Returns id size."""
return self.sparse_id_column.length
@property
def config(self):
config = _get_feature_config(self.sparse_id_column)
config.update(
{self.weight_column_name: parsing_ops.VarLenFeature(self.dtype)})
return config
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
"""Inserts a tuple with the id and weight tensors."""
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
weight_tensor = columns_to_tensors[self.weight_column_name]
if not isinstance(weight_tensor, sparse_tensor_py.SparseTensor):
# The weight tensor can be a regular Tensor. In such case, sparsify it.
weight_tensor = contrib_sparse_ops.dense_to_sparse_tensor(weight_tensor)
if not self.dtype.is_floating:
weight_tensor = math_ops.to_float(weight_tensor)
columns_to_tensors[self] = tuple([
columns_to_tensors[self.sparse_id_column],
weight_tensor
])
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor[0]
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return input_tensor[1]
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
raise ValueError(
"WeightedSparseColumn is not supported in DNN. "
"Please use embedding_column or one_hot_column. column: {}".format(
self))
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.id_tensor(input_tensor),
weight_tensor=self.weight_tensor(input_tensor),
vocab_size=self.length,
initializer=init_ops.zeros_initializer(),
combiner=self.sparse_id_column.combiner)
def weighted_sparse_column(sparse_id_column,
weight_column_name,
dtype=dtypes.float32):
"""Creates a _SparseColumn by combining sparse_id_column with a weight column.
Example:
```python
sparse_feature = sparse_column_with_hash_bucket(column_name="sparse_col",
hash_bucket_size=1000)
weighted_feature = weighted_sparse_column(sparse_id_column=sparse_feature,
weight_column_name="weights_col")
```
This configuration assumes that input dictionary of model contains the
following two items:
* (key="sparse_col", value=sparse_tensor) where sparse_tensor is
a SparseTensor.
* (key="weights_col", value=weights_tensor) where weights_tensor
is a SparseTensor.
Following are assumed to be true:
* sparse_tensor.indices = weights_tensor.indices
* sparse_tensor.dense_shape = weights_tensor.dense_shape
Args:
sparse_id_column: A `_SparseColumn` which is created by
`sparse_column_with_*` functions.
weight_column_name: A string defining a sparse column name which represents
weight or value of the corresponding sparse id feature.
dtype: Type of weights, such as `tf.float32`. Only floating and integer
weights are supported.
Returns:
A _WeightedSparseColumn composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if dtype is not convertible to float.
"""
if not (dtype.is_integer or dtype.is_floating):
raise ValueError("dtype is not convertible to float. Given {}".format(
dtype))
return _WeightedSparseColumn(sparse_id_column, weight_column_name, dtype)
class _OneHotColumn(_FeatureColumn,
collections.namedtuple("_OneHotColumn",
["sparse_id_column"])):
"""Represents a one-hot column for use in deep networks.
Args:
sparse_id_column: A _SparseColumn which is created by `sparse_column_with_*`
function.
"""
@property
def name(self):
return "{}_one_hot".format(self.sparse_id_column.name)
@property
def length(self):
"""Returns vocabulary or hash_bucket size."""
return self.sparse_id_column.length
@property
def config(self):
"""Returns the parsing config of the origin column."""
return _get_feature_config(self.sparse_id_column)
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
"""Used by the Transformer to prevent double transformations."""
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column]
def _to_dnn_input_layer(self,
transformed_input_tensor,
unused_weight_collections=None,
unused_trainable=False,
output_rank=2):
"""Returns a Tensor as an input to the first layer of neural network.
Args:
transformed_input_tensor: A tensor that has undergone the transformations
in `insert_transformed_feature`. Rank should be >= `output_rank`.
unused_weight_collections: Unused. One hot encodings are not variable.
unused_trainable: Unused. One hot encodings are not trainable.
output_rank: the desired rank of the output `Tensor`.
Returns:
A multihot Tensor to be fed into the first layer of neural network.
Raises:
ValueError: When using one_hot_column with weighted_sparse_column.
This is not yet supported.
"""
# Reshape ID column to `output_rank`.
sparse_id_column = self.sparse_id_column.id_tensor(transformed_input_tensor)
# pylint: disable=protected-access
sparse_id_column = layers._inner_flatten(sparse_id_column, output_rank)
weight_tensor = self.sparse_id_column.weight_tensor(
transformed_input_tensor)
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(sp_ids=sparse_id_column,
sp_values=weight_tensor,
vocab_size=self.length)
return sparse_ops.sparse_tensor_to_dense(weighted_column)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(sparse_id_column,
default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor, depth=self.length, on_value=1.0, off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(
one_hot_id_tensor, reduction_indices=[output_rank - 1])
class _EmbeddingColumn(_FeatureColumn, collections.namedtuple(
"_EmbeddingColumn",
["sparse_id_column", "dimension", "combiner", "initializer",
"ckpt_to_load_from", "tensor_name_in_ckpt", "shared_embedding_name",
"shared_vocab_size", "max_norm", "trainable"])):
"""Represents an embedding column.
Args:
sparse_id_column: A `_SparseColumn` which is created by
`sparse_column_with_*` or `weighted_sparse_column` functions.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_column.length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
shared_embedding_name: (Optional). The common name for shared embedding.
shared_vocab_size: (Optional). The common vocab_size used for shared
embedding space.
max_norm: (Optional). If not None, embedding values are l2-normalized to
the value of max_norm.
trainable: (Optional). Should the embedding be trainable. Default is True.
Raises:
ValueError: if `initializer` is specified and is not callable. Also,
if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified.
"""
def __new__(cls,
sparse_id_column,
dimension,
combiner="mean",
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
shared_embedding_name=None,
shared_vocab_size=None,
max_norm=None,
trainable=True):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified. "
"Embedding of column_name: {}".format(
sparse_id_column.name))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError("Must specify both `ckpt_to_load_from` and "
"`tensor_name_in_ckpt` or none of them.")
if initializer is None:
logging.warn("The default stddev value of initializer will change from "
"\"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" after "
"2017/02/25.")
stddev = 1 / math.sqrt(sparse_id_column.length)
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=stddev)
return super(_EmbeddingColumn, cls).__new__(cls, sparse_id_column,
dimension, combiner,
initializer, ckpt_to_load_from,
tensor_name_in_ckpt,
shared_embedding_name,
shared_vocab_size,
max_norm,
trainable)
@property
def name(self):
if self.shared_embedding_name is None:
return "{}_embedding".format(self.sparse_id_column.name)
else:
return "{}_shared_embedding".format(self.sparse_id_column.name)
@property
def length(self):
"""Returns id size."""
if self.shared_vocab_size is None:
return self.sparse_id_column.length
else:
return self.shared_vocab_size
@property
def config(self):
return _get_feature_config(self.sparse_id_column)
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["initializer"])
def insert_transformed_feature(self, columns_to_tensors):
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column]
def _deep_embedding_lookup_arguments(self, input_tensor):
return _DeepEmbeddingLookupArguments(
input_tensor=self.sparse_id_column.id_tensor(input_tensor),
weight_tensor=self.sparse_id_column.weight_tensor(input_tensor),
vocab_size=self.length,
dimension=self.dimension,
initializer=self.initializer,
combiner=self.combiner,
shared_embedding_name=self.shared_embedding_name,
hash_key=None,
max_norm=self.max_norm,
trainable=self.trainable)
def _checkpoint_path(self):
if self.ckpt_to_load_from is not None:
return self.ckpt_to_load_from, self.tensor_name_in_ckpt
return None
# pylint: disable=unused-argument
def _wide_embedding_lookup_arguments(self, input_tensor):
raise ValueError("Column {} is not supported in linear models. "
"Please use sparse_column.".format(self))
def one_hot_column(sparse_id_column):
"""Creates an `_OneHotColumn` for a one-hot or multi-hot repr in a DNN.
Args:
sparse_id_column: A _SparseColumn which is created by
`sparse_column_with_*`
or crossed_column functions. Note that `combiner` defined in
`sparse_id_column` is ignored.
Returns:
An _OneHotColumn.
"""
return _OneHotColumn(sparse_id_column)
def embedding_column(sparse_id_column,
dimension,
combiner="mean",
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""Creates an `_EmbeddingColumn` for feeding sparse data into a DNN.
Args:
sparse_id_column: A `_SparseColumn` which is created by for example
`sparse_column_with_*` or crossed_column functions. Note that `combiner`
defined in `sparse_id_column` is ignored.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_column.length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
max_norm: (Optional). If not None, embedding values are l2-normalized to
the value of max_norm.
trainable: (Optional). Should the embedding be trainable. Default is True
Returns:
An `_EmbeddingColumn`.
"""
return _EmbeddingColumn(sparse_id_column, dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt,
max_norm=max_norm, trainable=trainable)
def shared_embedding_columns(sparse_id_columns,
dimension,
combiner="mean",
shared_embedding_name=None,
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""Creates a list of `_EmbeddingColumn` sharing the same embedding.
Args:
sparse_id_columns: An iterable of `_SparseColumn`, such as those created by
`sparse_column_with_*` or crossed_column functions. Note that `combiner`
defined in each sparse_id_column is ignored.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
shared_embedding_name: (Optional). A string specifying the name of shared
embedding weights. This will be needed if you want to reference the shared
embedding separately from the generated `_EmbeddingColumn`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_columns[0].length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
max_norm: (Optional). If not None, embedding values are l2-normalized to
the value of max_norm.
trainable: (Optional). Should the embedding be trainable. Default is True
Returns:
A tuple of `_EmbeddingColumn` with shared embedding space.
Raises:
ValueError: if sparse_id_columns is empty, or its elements are not
compatible with each other.
TypeError: if `sparse_id_columns` is not a sequence or is a string. If at
least one element of `sparse_id_columns` is not a `SparseTensor`.
"""
if (not isinstance(sparse_id_columns, collections.Sequence) or
isinstance(sparse_id_columns, six.string_types)):
raise TypeError(
"sparse_id_columns must be a non-string sequence (ex: list or tuple) "
"instead of type {}.".format(type(sparse_id_columns)))
if len(sparse_id_columns) < 1:
raise ValueError("The input sparse_id_columns should have at least one "
"element.")
for sparse_id_column in sparse_id_columns:
if not isinstance(sparse_id_column, _SparseColumn):
raise TypeError("Elements of sparse_id_columns must be _SparseColumn, but"
"{} is not.".format(sparse_id_column))
if len(sparse_id_columns) == 1:
return [
_EmbeddingColumn(sparse_id_columns[0], dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt,
shared_embedding_name, max_norm=max_norm,
trainable=trainable)]
else:
# check compatibility of sparse_id_columns
compatible = True
for column in sparse_id_columns[1:]:
compatible = compatible and column.is_compatible(sparse_id_columns[0])
if not compatible:
raise ValueError("The input sparse id columns are not compatible.")
# Construct the shared name and size for shared embedding space.
if not shared_embedding_name:
# Sort the columns so that shared_embedding_name will be deterministic
# even if users pass in unsorted columns from a dict or something.
sorted_columns = sorted(sparse_id_columns)
if len(sorted_columns) <= 3:
shared_embedding_name = "_".join([column.name
for column in sorted_columns])
else:
shared_embedding_name = "_".join([column.name
for column in sorted_columns[0:3]])
shared_embedding_name += (
"_plus_{}_others".format(len(sorted_columns) - 3))
shared_embedding_name += "_shared_embedding"
shared_vocab_size = sparse_id_columns[0].length
embedded_columns = []
for column in sparse_id_columns:
embedded_columns.append(
_EmbeddingColumn(column, dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt,
shared_embedding_name, shared_vocab_size,
max_norm=max_norm, trainable=trainable))
return tuple(embedded_columns)
class _ScatteredEmbeddingColumn(
_FeatureColumn,
collections.namedtuple(
"_ScatteredEmbeddingColumn",
["column_name", "size", "dimension", "hash_key", "combiner",
"initializer"])):
"""See `scattered_embedding_column`."""
def __new__(cls,
column_name,
size,
dimension,
hash_key,
combiner="sqrtn",
initializer=None):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified. "
"column_name: {}".format(column_name))
if initializer is None:
logging.warn("The default stddev value of initializer will change from "
"\"0.1\" to \"1/sqrt(dimension)\" after 2017/02/25.")
stddev = 0.1
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=stddev)
return super(_ScatteredEmbeddingColumn, cls).__new__(cls, column_name, size,
dimension, hash_key,
combiner,
initializer)
@property
def name(self):
return "{}_scattered_embedding".format(self.column_name)
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(dtypes.string)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["initializer"])
def insert_transformed_feature(self, columns_to_tensors):
columns_to_tensors[self] = columns_to_tensors[self.column_name]
def _deep_embedding_lookup_arguments(self, input_tensor):
return _DeepEmbeddingLookupArguments(
input_tensor=input_tensor,
weight_tensor=None,
vocab_size=self.size,
initializer=self.initializer,
combiner=self.combiner,
dimension=self.dimension,
shared_embedding_name=None,
hash_key=self.hash_key,
max_norm=None,
trainable=True)
def scattered_embedding_column(column_name,
size,
dimension,
hash_key,
combiner="mean",
initializer=None):
"""Creates an embedding column of a sparse feature using parameter hashing.
This is a useful shorthand when you have a sparse feature you want to use an
embedding for, but also want to hash the embedding's values in each dimension
to a variable based on a different hash.
Specifically, the i-th embedding component of a value v is found by retrieving
an embedding weight whose index is a fingerprint of the pair (v,i).
An embedding column with sparse_column_with_hash_bucket such as
embedding_column(
sparse_column_with_hash_bucket(column_name, bucket_size),
dimension)
could be replaced by
scattered_embedding_column(
column_name,
size=bucket_size * dimension,
dimension=dimension,
hash_key=tf.contrib.layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
for the same number of embedding parameters. This should hopefully reduce the
impact of collisions, but adds the cost of slowing down training.
Args:
column_name: A string defining sparse column name.
size: An integer specifying the number of parameters in the embedding layer.
dimension: An integer specifying dimension of the embedding.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0 and standard deviation 0.1.
Returns:
A _ScatteredEmbeddingColumn.
Raises:
ValueError: if dimension or size is not a positive integer; or if combiner
is not supported.
"""
if (dimension < 1) or (size < 1):
raise ValueError("Dimension and size must be greater than 0. "
"dimension: {}, size: {}, column_name: {}".format(
dimension, size, column_name))
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'. "
"combiner: {}, column_name: {}".format(combiner,
column_name))
return _ScatteredEmbeddingColumn(column_name, size, dimension, hash_key,
combiner, initializer)
def _reshape_real_valued_tensor(input_tensor, output_rank, column_name=None):
"""Reshaping logic for dense, numeric `Tensors`.
Follows the following rules:
1. If `output_rank > input_rank + 1` raise a `ValueError`.
2. If `output_rank == input_rank + 1`, expand `input_tensor` by one
dimension and return
3. If `output_rank == input_rank`, return `input_tensor`.
4. If `output_rank < input_rank`, flatten the inner dimensions of
`input_tensor` and return a `Tensor` with `output_rank`
Args:
input_tensor: a dense `Tensor` to be reshaped.
output_rank: the desired rank of the reshaped `Tensor`.
column_name: (optional) the name of the associated column. Used for error
messages.
Returns:
A `Tensor` with the same entries as `input_tensor` and rank `output_rank`.
Raises:
ValueError: if `output_rank > input_rank + 1`.
"""
input_rank = input_tensor.get_shape().ndims
if input_rank is not None:
if output_rank > input_rank + 1:
error_string = ("Rank of input Tensor ({}) should be the same as "
"output_rank ({}). For example, sequence data should "
"typically be 3 dimensional (rank 3) while non-sequence "
"data is typically 2 dimensional (rank 2).".format(
input_rank, output_rank))
if column_name is not None:
error_string = ("Error while processing column {}.".format(column_name)
+ error_string)
raise ValueError(error_string)
if output_rank == input_rank + 1:
logging.warning(
"Rank of input Tensor ({}) should be the same as output_rank ({}) "
"for column. Will attempt to expand dims. It is highly recommended "
"that you resize your input, as this behavior may change.".format(
input_rank, output_rank))
return array_ops.expand_dims(input_tensor, -1, name="expand_dims")
if output_rank == input_rank:
return input_tensor
# Here, either `input_rank` is unknown or it is greater than `output_rank`.
return layers._inner_flatten(input_tensor, output_rank) # pylint: disable=protected-access
class _RealValuedColumn(_FeatureColumn, collections.namedtuple(
"_RealValuedColumn",
["column_name", "dimension", "default_value", "dtype", "normalizer"])):
"""Represents a real valued feature column also known as continuous features.
Instances of this class are immutable. A real valued column with a specified
dimension means features are dense, otherwise they're sparse.
In the dense case, the dictionary returned by InputBuilder contains a
("column_name", Tensor) pair with a Tensor shape of (batch_size, dimension).
In the sparse shape, the dictionary contains a ("column_name", SparseTensor)
pair instead with shape inferred after parsing.
"""
def __new__(cls, column_name, dimension, default_value,
dtype, normalizer):
if default_value is not None:
default_value = tuple(default_value)
return super(_RealValuedColumn, cls).__new__(cls, column_name, dimension,
default_value, dtype,
normalizer)
@property
def name(self):
return self.column_name
@property
def config(self):
if self.dimension is None:
return {self.column_name: parsing_ops.VarLenFeature(self.dtype)}
else:
default_value = self.default_value
if default_value is not None:
default_value = list(default_value)
return {self.column_name: parsing_ops.FixedLenFeature([self.dimension],
self.dtype,
default_value)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["normalizer"])
@property
def normalizer_fn(self):
"""Returns the function used to normalize the column."""
return self.normalizer
def _normalized_input_tensor(self, input_tensor):
"""Returns the input tensor after custom normalization is applied."""
return (self.normalizer(input_tensor) if self.normalizer is not None else
input_tensor)
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
# Transform the input tensor according to the normalizer function.
input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name])
columns_to_tensors[self] = math_ops.to_float(input_tensor)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
input_tensor = self._to_dense_tensor(input_tensor)
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.to_float(input_tensor)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
if isinstance(input_tensor, sparse_tensor_py.SparseTensor):
default_value = (self.default_value[0] if self.default_value is not None
else 0)
return sparse_ops.sparse_tensor_to_dense(
input_tensor, default_value=default_value)
return input_tensor
def real_valued_column(column_name,
dimension=1,
default_value=None,
dtype=dtypes.float32,
normalizer=None):
"""Creates a `_RealValuedColumn` for dense numeric data.
Args:
column_name: A string defining real valued column name.
dimension: An integer specifying dimension of the real valued column.
The default is 1. When dimension is not None, the Tensor representing
the _RealValuedColumn will have the shape of [batch_size, dimension].
A None dimension means the feature column should be treat as variable
length and will be parsed as a `SparseTensor`.
default_value: A single value compatible with dtype or a list of values
compatible with dtype which the column takes on during tf.Example parsing
if data is missing. When dimension is not None, a default value of None
will cause tf.parse_example to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every dimension. If a list of values is provided,
the length of the list should be equal to the value of `dimension`.
Only scalar default value is supported in case dimension is not specified.
dtype: defines the type of values. Default value is tf.float32. Must be a
non-quantized, real integer or floating point type.
normalizer: If not None, a function that can be used to normalize the value
of the real valued column after default_value is applied for parsing.
Normalizer function takes the input tensor as its argument, and returns
the output tensor. (e.g. lambda x: (x - 3.0) / 4.2). Note that for
variable length columns, the normalizer should expect an input_tensor of
type `SparseTensor`.
Returns:
A _RealValuedColumn.
Raises:
TypeError: if dimension is not an int
ValueError: if dimension is not a positive integer
TypeError: if default_value is a list but its length is not equal to the
value of `dimension`.
TypeError: if default_value is not compatible with dtype.
ValueError: if dtype is not convertible to tf.float32.
"""
if dimension is not None:
if not isinstance(dimension, int):
raise TypeError("dimension must be an integer. "
"dimension: {}, column_name: {}".format(dimension,
column_name))
if dimension < 1:
raise ValueError("dimension must be greater than 0. "
"dimension: {}, column_name: {}".format(dimension,
column_name))
if not (dtype.is_integer or dtype.is_floating):
raise ValueError("dtype must be convertible to float. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if default_value is None:
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, int):
if dtype.is_integer:
default_value = ([default_value for _ in range(dimension)] if dimension
else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if dtype.is_floating:
default_value = float(default_value)
default_value = ([default_value for _ in range(dimension)] if dimension
else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, float):
if dtype.is_floating and (not dtype.is_integer):
default_value = ([default_value for _ in range(dimension)] if dimension
else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, list):
if dimension is None:
raise ValueError(
"Only scalar default value is supported when dimension is None. "
"default_value: {}, column_name: {}".format(
default_value, column_name))
if len(default_value) != dimension:
raise ValueError(
"The length of default_value must be equal to dimension. "
"default_value: {}, dimension: {}, column_name: {}".format(
default_value, dimension, column_name))
# Check if the values in the list are all integers or are convertible to
# floats.
is_list_all_int = True
is_list_all_float = True
for v in default_value:
if not isinstance(v, int):
is_list_all_int = False
if not (isinstance(v, float) or isinstance(v, int)):
is_list_all_float = False
if is_list_all_int:
if dtype.is_integer:
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
elif dtype.is_floating:
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if is_list_all_float:
if dtype.is_floating and (not dtype.is_integer):
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
raise TypeError("default_value must be compatible with dtype. "
"default_value: {}, dtype: {}, column_name: {}".format(
default_value, dtype, column_name))
class _BucketizedColumn(_FeatureColumn, collections.namedtuple(
"_BucketizedColumn", ["source_column", "boundaries"])):
"""Represents a bucketization transformation also known as binning.
Instances of this class are immutable. Values in `source_column` will be
bucketized based on `boundaries`.
For example, if the inputs are:
boundaries = [0, 10, 100]
source_column = [[-5], [150], [10], [0], [4], [19]]
then the bucketized feature will be:
output = [[0], [3], [2], [1], [1], [2]]
Attributes:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list or tuple of floats specifying the boundaries. It has to
be sorted. [a, b, c] defines following buckets: (-inf., a), [a, b),
[b, c), [c, inf.)
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
def __new__(cls, source_column, boundaries):
if not isinstance(source_column, _RealValuedColumn):
raise TypeError("source_column must be an instance of _RealValuedColumn. "
"source_column: {}".format(source_column))
if source_column.dimension is None:
raise ValueError("source_column must have a defined dimension. "
"source_column: {}".format(source_column))
if (not isinstance(boundaries, list) and
not isinstance(boundaries, tuple)) or not boundaries:
raise ValueError("boundaries must be a non-empty list or tuple. "
"boundaries: {}".format(boundaries))
# We allow bucket boundaries to be monotonically increasing
# (ie a[i+1] >= a[i]). When two bucket boundaries are the same, we
# de-duplicate.
sanitized_boundaries = []
for i in range(len(boundaries) - 1):
if boundaries[i] == boundaries[i + 1]:
continue
elif boundaries[i] < boundaries[i + 1]:
sanitized_boundaries.append(boundaries[i])
else:
raise ValueError("boundaries must be a sorted list. "
"boundaries: {}".format(boundaries))
sanitized_boundaries.append(boundaries[len(boundaries) - 1])
return super(_BucketizedColumn, cls).__new__(cls, source_column,
tuple(sanitized_boundaries))
@property
def name(self):
return "{}_bucketized".format(self.source_column.name)
@property
def length(self):
"""Returns total number of buckets."""
return len(self.boundaries) + 1
@property
def config(self):
return self.source_column.config
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
# Bucketize the source column.
if self.source_column not in columns_to_tensors:
self.source_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = bucketization_op.bucketize(
columns_to_tensors[self.source_column],
boundaries=list(self.boundaries),
name="bucketize")
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
if output_rank != 2:
raise ValueError("BucketizedColumn currently only supports output_rank=2")
return array_ops.reshape(
array_ops.one_hot(
math_ops.to_int64(input_tensor),
self.length,
1.,
0.,
name="one_hot"), [-1, self.length * self.source_column.dimension],
name="reshape")
def to_sparse_tensor(self, input_tensor):
"""Creates a SparseTensor from the bucketized Tensor."""
dimension = self.source_column.dimension
batch_size = array_ops.shape(input_tensor, name="shape")[0]
if dimension > 1:
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(
math_ops.range(0, batch_size), 1, name="expand_dims"),
[1, dimension],
name="tile"), [-1],
name="reshape")
i2 = array_ops.tile(
math_ops.range(0, dimension), [batch_size], name="tile")
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = array_ops.reshape(
input_tensor, [-1], name="reshape") + self.length * i2
else:
# Simpler indices when dimension=1
i1 = math_ops.range(0, batch_size)
i2 = array_ops.zeros([batch_size], dtype=dtypes.int32, name="zeros")
bucket_indices = array_ops.reshape(input_tensor, [-1], name="reshape")
indices = math_ops.to_int64(array_ops.transpose(array_ops.stack((i1, i2))))
shape = math_ops.to_int64(array_ops.stack([batch_size, dimension]))
sparse_id_values = sparse_tensor_py.SparseTensor(
indices, bucket_indices, shape)
return sparse_id_values
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.to_sparse_tensor(input_tensor),
weight_tensor=None,
vocab_size=self.length * self.source_column.dimension,
initializer=init_ops.zeros_initializer(),
combiner="sum")
def bucketized_column(source_column, boundaries):
"""Creates a _BucketizedColumn for discretizing dense input.
Args:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list or tuple of floats specifying the boundaries. It has to
be sorted.
Returns:
A _BucketizedColumn.
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
return _BucketizedColumn(source_column, boundaries)
class _CrossedColumn(_FeatureColumn,
collections.namedtuple("_CrossedColumn",
["columns", "hash_bucket_size",
"hash_key",
"combiner", "ckpt_to_load_from",
"tensor_name_in_ckpt"])):
"""Represents a cross transformation also known as conjuction or combination.
Instances of this class are immutable. It crosses given `columns`. Crossed
column output will be hashed to hash_bucket_size.
Conceptually, transformation can be thought as:
Hash(cartesian product of features in columns) % `hash_bucket_size`
For example, if the columns are
SparseTensor referred by first column: shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
SparseTensor referred by second column: : shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
then crossed feature will look like:
shape = [2, 2]
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
Attributes:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column::
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
Raises:
TypeError: if all items in columns are not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn.
ValueError: if hash_bucket_size is not > 1 or len(columns) is not > 1. Also,
if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified.
"""
@staticmethod
def _assert_is_crossable(column):
if isinstance(column, (_SparseColumn, _CrossedColumn, _BucketizedColumn)):
return
raise TypeError("columns must be a set of _SparseColumn, "
"_CrossedColumn, or _BucketizedColumn instances. "
"(column {} is a {})".format(column,
column.__class__.__name__))
def __new__(cls,
columns,
hash_bucket_size,
hash_key,
combiner="sum",
ckpt_to_load_from=None,
tensor_name_in_ckpt=None):
for column in columns:
_CrossedColumn._assert_is_crossable(column)
if len(columns) < 2:
raise ValueError("columns must contain at least 2 elements. "
"columns: {}".format(columns))
if hash_bucket_size < 2:
raise ValueError("hash_bucket_size must be at least 2. "
"hash_bucket_size: {}".format(hash_bucket_size))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError("Must specify both `ckpt_to_load_from` and "
"`tensor_name_in_ckpt` or none of them.")
sorted_columns = sorted(
[column for column in columns], key=lambda column: column.name)
return super(_CrossedColumn, cls).__new__(cls, tuple(sorted_columns),
hash_bucket_size, hash_key,
combiner,
ckpt_to_load_from,
tensor_name_in_ckpt)
@property
def name(self):
sorted_names = sorted([column.name for column in self.columns])
return "_X_".join(sorted_names)
@property
def config(self):
config = {}
for column in self.columns:
config.update(_get_feature_config(column))
return config
@property
def length(self):
"""Returns total number of buckets."""
return self.hash_bucket_size
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor
# pylint: disable=unused-argument
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return None
def insert_transformed_feature(self, columns_to_tensors):
"""Handles cross transformation."""
def _collect_leaf_level_columns(cross):
"""Collects base columns contained in the cross."""
leaf_level_columns = []
for c in cross.columns:
if isinstance(c, _CrossedColumn):
leaf_level_columns.extend(_collect_leaf_level_columns(c))
else:
leaf_level_columns.append(c)
return leaf_level_columns
feature_tensors = []
for c in _collect_leaf_level_columns(self):
if isinstance(c, _SparseColumn):
feature_tensors.append(columns_to_tensors[c.name])
else:
if c not in columns_to_tensors:
c.insert_transformed_feature(columns_to_tensors)
if isinstance(c, _BucketizedColumn):
feature_tensors.append(c.to_sparse_tensor(columns_to_tensors[c]))
else:
feature_tensors.append(columns_to_tensors[c])
columns_to_tensors[self] = sparse_feature_cross_op.sparse_feature_cross(
feature_tensors,
hashed_output=True,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key,
name="cross")
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
raise ValueError("CrossedColumn is not supported in DNN. "
"Please use embedding_column. column: {}".format(self))
def _checkpoint_path(self):
if self.ckpt_to_load_from is not None:
return self.ckpt_to_load_from, self.tensor_name_in_ckpt
return None
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=input_tensor,
weight_tensor=None,
vocab_size=self.length,
initializer=init_ops.zeros_initializer(),
combiner=self.combiner)
def crossed_column(columns, hash_bucket_size, combiner="sum",
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
hash_key=None):
"""Creates a _CrossedColumn for performing feature crosses.
Args:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column::
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
A _CrossedColumn.
Raises:
TypeError: if any item in columns is not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn, or
hash_bucket_size is not an int.
ValueError: if hash_bucket_size is not > 1 or
len(columns) is not > 1.
"""
return _CrossedColumn(
columns,
hash_bucket_size,
hash_key,
combiner=combiner,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt)
class DataFrameColumn(_FeatureColumn,
collections.namedtuple("DataFrameColumn",
["column_name", "series"])):
"""Represents a feature column produced from a `DataFrame`.
Instances of this class are immutable. A `DataFrame` column may be dense or
sparse, and may have any shape, with the constraint that dimension 0 is
batch_size.
Args:
column_name: a name for this column
series: a `Series` to be wrapped, which has already had its base features
substituted with `PredefinedSeries`.
"""
def __new__(cls, column_name, series):
return super(DataFrameColumn, cls).__new__(cls, column_name, series)
@property
def name(self):
return self.column_name
@property
def config(self):
return self.series.required_base_features()
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self.name
def insert_transformed_feature(self, columns_to_tensors):
# The cache must already contain mappings from the expected base feature
# names to Tensors.
# Passing columns_to_tensors as the cache here means that multiple outputs
# of the transform will be cached, keyed by the repr of their associated
# TransformedSeries.
# The specific requested output ends up in columns_to_tensors twice: once
# keyed by the TransformedSeries repr, and once keyed by this
# DataFrameColumn instance.
columns_to_tensors[self] = self.series.build(columns_to_tensors)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.to_float(input_tensor)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
return self._to_dnn_input_layer(input_tensor)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def _get_feature_config(feature_column):
"""Returns configuration for the base feature defined in feature_column."""
if not isinstance(feature_column, _FeatureColumn):
raise TypeError(
"feature_columns should only contain instances of _FeatureColumn. "
"Given column is {}".format(feature_column))
if isinstance(feature_column, (_SparseColumn, _WeightedSparseColumn,
_EmbeddingColumn, _RealValuedColumn,
_BucketizedColumn, _CrossedColumn,
_OneHotColumn, _ScatteredEmbeddingColumn)):
return feature_column.config
raise TypeError("Not supported _FeatureColumn type. "
"Given column is {}".format(feature_column))
def create_feature_spec_for_parsing(feature_columns):
"""Helper that prepares features config from input feature_columns.
The returned feature config can be used as arg 'features' in tf.parse_example.
Typical usage example:
```python
# Define features and transformations
feature_a = sparse_column_with_vocabulary_file(...)
feature_b = real_valued_column(...)
feature_c_bucketized = bucketized_column(real_valued_column("feature_c"), ...)
feature_a_x_feature_c = crossed_column(
columns=[feature_a, feature_c_bucketized], ...)
feature_columns = set(
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
batch_examples = tf.parse_example(
serialized=serialized_examples,
features=create_feature_spec_for_parsing(feature_columns))
```
For the above example, create_feature_spec_for_parsing would return the dict:
{
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn, unless
feature_columns is a dict -- in which case, this should be true of all
values in the dict.
Returns:
A dict mapping feature keys to FixedLenFeature or VarLenFeature values.
"""
if isinstance(feature_columns, dict):
feature_columns = feature_columns.values()
features_config = {}
for column in feature_columns:
features_config.update(_get_feature_config(column))
return features_config
def _create_sequence_feature_spec_for_parsing(sequence_feature_columns,
allow_missing_by_default=False):
"""Prepares a feature spec for parsing `tf.SequenceExample`s.
Args:
sequence_feature_columns: an iterable containing all the feature columns.
All items should be instances of classes derived from `_FeatureColumn`.
allow_missing_by_default: whether to set `allow_missing=True` by default for
`FixedLenSequenceFeature`s.
Returns:
A dict mapping feature keys to `FixedLenSequenceFeature` or `VarLenFeature`.
"""
feature_spec = create_feature_spec_for_parsing(sequence_feature_columns)
sequence_feature_spec = {}
for key, feature in feature_spec.items():
if isinstance(feature, parsing_ops.VarLenFeature):
sequence_feature = feature
elif isinstance(feature, parsing_ops.FixedLenFeature):
default_is_set = feature.default_value is not None
if default_is_set:
logging.warning(
'Found default value {} for feature "{}". Ignoring this value and '
'setting `allow_missing=True` instead.'.
format(feature.default_value, key))
sequence_feature = parsing_ops.FixedLenSequenceFeature(
shape=feature.shape,
dtype=feature.dtype,
allow_missing=(allow_missing_by_default or default_is_set))
else:
raise TypeError(
"Unsupported feature type: {}".format(type(feature).__name__))
sequence_feature_spec[key] = sequence_feature
return sequence_feature_spec
def make_place_holder_tensors_for_base_features(feature_columns):
"""Returns placeholder tensors for inference.
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
Returns:
A dict mapping feature keys to SparseTensors (sparse columns) or
placeholder Tensors (dense columns).
"""
# Get dict mapping features to FixedLenFeature or VarLenFeature values.
dict_for_parse_example = create_feature_spec_for_parsing(feature_columns)
placeholders = {}
for column_name, column_type in dict_for_parse_example.items():
if isinstance(column_type, parsing_ops.VarLenFeature):
# Sparse placeholder for sparse tensors.
placeholders[column_name] = array_ops.sparse_placeholder(
column_type.dtype, name="Placeholder_{}".format(column_name))
else:
# Simple placeholder for dense tensors.
placeholders[column_name] = array_ops.placeholder(
column_type.dtype,
shape=(None, column_type.shape[0]),
name="Placeholder_{}".format(column_name))
return placeholders
class _SparseIdLookupConfig(
collections.namedtuple("_SparseIdLookupConfig",
["vocabulary_file", "keys", "num_oov_buckets",
"vocab_size", "default_value"])):
"""Defines lookup configuration for a sparse feature.
An immutable object defines lookup table configuration used by
tf.feature_to_id_v2.
Attributes:
vocabulary_file: The vocabulary filename. vocabulary_file cannot be combined
with keys.
keys: A 1-D string iterable that specifies the mapping of strings to
indices. It means a feature in keys will map to it's index in keys.
num_oov_buckets: The number of out-of-vocabulary buckets. If zero all out of
vocabulary features will be ignored.
vocab_size: Number of the elements in the vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
"""
def __new__(cls,
vocabulary_file=None,
keys=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1):
return super(_SparseIdLookupConfig, cls).__new__(cls, vocabulary_file, keys,
num_oov_buckets,
vocab_size, default_value)
| taknevski/tensorflow-xsmm | tensorflow/contrib/layers/python/layers/feature_column.py | Python | apache-2.0 | 89,072 |
##
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
"""
Most s2n integration tests are run against a variety of arguments.
A "scenario" represents a specific set of inputs, such as address,
cipher, version, etc.
"""
import itertools
import multiprocessing
import os
from enum import Enum as BaseEnum
from multiprocessing.pool import ThreadPool
class Enum(BaseEnum):
def __str__(self):
return self.name
@classmethod
def all(cls):
return cls
class Version(Enum):
SSLv3 = 30
TLS10 = 31
TLS11 = 32
TLS12 = 33
TLS13 = 34
class Mode(Enum):
client = 0
server = 1
def is_client(self):
return self is Mode.client
def is_server(self):
return self is Mode.server
def other(self):
return Mode.server if self.is_client() else Mode.client
class Cipher():
def __init__(self, name, min_version):
self.name = name
self.min_version = min_version
def valid_for(self, version):
if not version:
version = Version.default()
if version.value < self.min_version.value:
return False
if self.min_version is Version.TLS13:
return version.value >= Version.TLS13.value
return True
def __str__(self):
return self.name
@classmethod
def all(cls):
return ALL_CIPHERS_PER_LIBCRYPTO_VERSION[get_libcrypto()]
def get_libcrypto():
return str(os.getenv("S2N_LIBCRYPTO")).strip('"')
ALL_CIPHERS = [
Cipher("TLS_AES_256_GCM_SHA384", Version.TLS13),
Cipher("TLS_CHACHA20_POLY1305_SHA256", Version.TLS13),
Cipher("TLS_AES_128_GCM_SHA256", Version.TLS13)
]
# Older Openssl and libressl do not support CHACHA20
LEGACY_COMPATIBLE_CIPHERS = list(filter(lambda x: "CHACHA20" not in x.name, ALL_CIPHERS))
ALL_CIPHERS_PER_LIBCRYPTO_VERSION = {
"openssl-1.1.1" : ALL_CIPHERS,
"openssl-1.0.2" : LEGACY_COMPATIBLE_CIPHERS,
"openssl-1.0.2-fips" : LEGACY_COMPATIBLE_CIPHERS,
"libressl" : LEGACY_COMPATIBLE_CIPHERS,
}
ALL_CURVES = ["P-256", "P-384"]
class Scenario:
"""
Describes the configuration for a specific TLS connection.
"""
def __init__(self, s2n_mode, host, port, version=None, cipher=None, curve=None, s2n_flags=[], peer_flags=[]):
"""
Args:
s2n_mode: whether s2n should act as a client or server.
host: host to connect or listen to.
port: port to connect or listen to.
version: which TLS protocol version to use. If None, the implementation will
use its default.
cipher: which cipher to use. If None, the implementation will use its default.
s2n_flags: any extra flags that should be passed to s2n.
peer_flags: any extra flags that should be passed to the TLS implementation
that s2n connects to.
"""
self.s2n_mode = s2n_mode
self.host = host
self.port = port
self.version = version
self.cipher = cipher
self.curve = curve
self.s2n_flags = s2n_flags
self.peer_flags = peer_flags
def __str__(self):
version = self.version if self.version else "DEFAULT"
cipher = self.cipher if self.cipher else "ANY"
result = "Mode:%s %s Version:%s Curve:%s Cipher:%s" % \
(self.s2n_mode, " ".join(self.s2n_flags), str(version).ljust(7), self.curve, str(cipher).ljust(30))
return result.ljust(100)
def __create_thread_pool():
threadpool_size = multiprocessing.cpu_count() * 2 # Multiply by 2 since performance improves slightly if CPU has hyperthreading
threadpool = ThreadPool(processes=threadpool_size)
return threadpool
def run_scenarios(test_func, scenarios):
failed = 0
threadpool = __create_thread_pool()
results = {}
print("\tRunning scenarios: " + str(len(scenarios)))
for scenario in scenarios:
async_result = threadpool.apply_async(test_func, (scenario,))
results.update({scenario: async_result})
threadpool.close()
threadpool.join()
results.update((k, v.get()) for k,v in results.items())
# Sort the results so that failures appear at the end
sorted_results = sorted(results.items(), key=lambda x: not x[1].is_success())
for scenario, result in sorted_results:
print("%s %s" % (str(scenario), str(result).rstrip()))
if not result.is_success():
failed += 1
return failed
def get_scenarios(host, start_port, s2n_modes=Mode.all(), versions=[None], ciphers=[None], curves=ALL_CURVES, s2n_flags=[], peer_flags=[]):
port = start_port
scenarios = []
combos = itertools.product(versions, s2n_modes, ciphers, curves)
for (version, s2n_mode, cipher, curve) in combos:
if cipher and not cipher.valid_for(version):
continue
for s2n_mode in s2n_modes:
scenarios.append(Scenario(
s2n_mode=s2n_mode,
host=host,
port=port,
version=version,
cipher=cipher,
curve=curve,
s2n_flags=s2n_flags,
peer_flags=peer_flags))
port += 1
return scenarios
| alexeblee/s2n | tests/integration/common/s2n_test_scenario.py | Python | apache-2.0 | 5,789 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import heapq, logging, os, re, socket, time, types
from proton import dispatch, generate_uuid, PN_ACCEPTED, SASL, symbol, ulong, Url
from proton import Collector, Connection, Delivery, Described, Endpoint, Event, Link, Terminus, Timeout
from proton import Message, Handler, ProtonException, Transport, TransportException, ConnectionException
from select import select
class OutgoingMessageHandler(Handler):
"""
A utility for simpler and more intuitive handling of delivery
events related to outgoing i.e. sent messages.
"""
def __init__(self, auto_settle=True, delegate=None):
self.auto_settle = auto_settle
self.delegate = delegate
def on_link_flow(self, event):
if event.link.is_sender and event.link.credit:
self.on_sendable(event)
def on_delivery(self, event):
dlv = event.delivery
if dlv.link.is_sender and dlv.updated:
if dlv.remote_state == Delivery.ACCEPTED:
self.on_accepted(event)
elif dlv.remote_state == Delivery.REJECTED:
self.on_rejected(event)
elif dlv.remote_state == Delivery.RELEASED or dlv.remote_state == Delivery.MODIFIED:
self.on_released(event)
if dlv.settled:
self.on_settled(event)
if self.auto_settle:
dlv.settle()
def on_sendable(self, event):
"""
Called when the sender link has credit and messages can
therefore be transferred.
"""
if self.delegate:
dispatch(self.delegate, 'on_sendable', event)
def on_accepted(self, event):
"""
Called when the remote peer accepts an outgoing message.
"""
if self.delegate:
dispatch(self.delegate, 'on_accepted', event)
def on_rejected(self, event):
"""
Called when the remote peer rejects an outgoing message.
"""
if self.delegate:
dispatch(self.delegate, 'on_rejected', event)
def on_released(self, event):
"""
Called when the remote peer releases an outgoing message. Note
that this may be in response to either the RELEASE or MODIFIED
state as defined by the AMQP specification.
"""
if self.delegate:
dispatch(self.delegate, 'on_released', event)
def on_settled(self, event):
"""
Called when the remote peer has settled the outgoing
message. This is the point at which it shouod never be
retransmitted.
"""
if self.delegate:
dispatch(self.delegate, 'on_settled', event)
def recv_msg(delivery):
msg = Message()
msg.decode(delivery.link.recv(delivery.pending))
delivery.link.advance()
return msg
class Reject(ProtonException):
"""
An exception that indicate a message should be rejected
"""
pass
class Release(ProtonException):
"""
An exception that indicate a message should be rejected
"""
pass
class Acking(object):
def accept(self, delivery):
"""
Accepts a received message.
"""
self.settle(delivery, Delivery.ACCEPTED)
def reject(self, delivery):
"""
Rejects a received message that is considered invalid or
unprocessable.
"""
self.settle(delivery, Delivery.REJECTED)
def release(self, delivery, delivered=True):
"""
Releases a received message, making it available at the source
for any (other) interested receiver. The ``delivered``
parameter indicates whether this should be considered a
delivery attempt (and the delivery count updated) or not.
"""
if delivered:
self.settle(delivery, Delivery.MODIFIED)
else:
self.settle(delivery, Delivery.RELEASED)
def settle(self, delivery, state=None):
if state:
delivery.update(state)
delivery.settle()
class IncomingMessageHandler(Handler, Acking):
"""
A utility for simpler and more intuitive handling of delivery
events related to incoming i.e. received messages.
"""
def __init__(self, auto_accept=True, delegate=None):
self.delegate = delegate
self.auto_accept = auto_accept
def on_delivery(self, event):
dlv = event.delivery
if not dlv.link.is_receiver: return
if dlv.readable and not dlv.partial:
event.message = recv_msg(dlv)
if event.link.state & Endpoint.LOCAL_CLOSED:
if self.auto_accept:
dlv.update(Delivery.RELEASED)
dlv.settle()
else:
try:
self.on_message(event)
if self.auto_accept:
dlv.update(Delivery.ACCEPTED)
dlv.settle()
except Reject:
dlv.update(Delivery.REJECTED)
dlv.settle()
except Release:
dlv.update(Delivery.MODIFIED)
dlv.settle()
elif dlv.updated and dlv.settled:
self.on_settled(event)
def on_message(self, event):
"""
Called when a message is received. The message itself can be
obtained as a property on the event. For the purpose of
refering to this message in further actions (e.g. if
explicitly accepting it, the ``delivery`` should be used, also
obtainable via a property on the event.
"""
if self.delegate:
dispatch(self.delegate, 'on_message', event)
def on_settled(self, event):
if self.delegate:
dispatch(self.delegate, 'on_settled', event)
class EndpointStateHandler(Handler):
"""
A utility that exposes 'endpoint' events i.e. the open/close for
links, sessions and connections in a more intuitive manner. A
XXX_opened method will be called when both local and remote peers
have opened the link, session or connection. This can be used to
confirm a locally initiated action for example. A XXX_opening
method will be called when the remote peer has requested an open
that was not initiated locally. By default this will simply open
locally, which then triggers the XXX_opened call. The same applies
to close.
"""
def __init__(self, peer_close_is_error=False, delegate=None):
self.delegate = delegate
self.peer_close_is_error = peer_close_is_error
@classmethod
def is_local_open(cls, endpoint):
return endpoint.state & Endpoint.LOCAL_ACTIVE
@classmethod
def is_local_uninitialised(cls, endpoint):
return endpoint.state & Endpoint.LOCAL_UNINIT
@classmethod
def is_local_closed(cls, endpoint):
return endpoint.state & Endpoint.LOCAL_CLOSED
@classmethod
def is_remote_open(cls, endpoint):
return endpoint.state & Endpoint.REMOTE_ACTIVE
@classmethod
def is_remote_closed(cls, endpoint):
return endpoint.state & Endpoint.REMOTE_CLOSED
@classmethod
def print_error(cls, endpoint, endpoint_type):
if endpoint.remote_condition:
logging.error(endpoint.remote_condition.description)
elif cls.is_local_open(endpoint) and cls.is_remote_closed(endpoint):
logging.error("%s closed by peer" % endpoint_type)
def on_link_remote_close(self, event):
if event.link.remote_condition:
self.on_link_error(event)
elif self.is_local_closed(event.link):
self.on_link_closed(event)
else:
self.on_link_closing(event)
event.link.close()
def on_session_remote_close(self, event):
if event.session.remote_condition:
self.on_session_error(event)
elif self.is_local_closed(event.session):
self.on_session_closed(event)
else:
self.on_session_closing(event)
event.session.close()
def on_connection_remote_close(self, event):
if event.connection.remote_condition:
self.on_connection_error(event)
elif self.is_local_closed(event.connection):
self.on_connection_closed(event)
else:
self.on_connection_closing(event)
event.connection.close()
def on_connection_local_open(self, event):
if self.is_remote_open(event.connection):
self.on_connection_opened(event)
def on_connection_remote_open(self, event):
if self.is_local_open(event.connection):
self.on_connection_opened(event)
elif self.is_local_uninitialised(event.connection):
self.on_connection_opening(event)
event.connection.open()
def on_session_local_open(self, event):
if self.is_remote_open(event.session):
self.on_session_opened(event)
def on_session_remote_open(self, event):
if self.is_local_open(event.session):
self.on_session_opened(event)
elif self.is_local_uninitialised(event.session):
self.on_session_opening(event)
event.session.open()
def on_link_local_open(self, event):
if self.is_remote_open(event.link):
self.on_link_opened(event)
def on_link_remote_open(self, event):
if self.is_local_open(event.link):
self.on_link_opened(event)
elif self.is_local_uninitialised(event.link):
self.on_link_opening(event)
event.link.open()
def on_connection_opened(self, event):
if self.delegate:
dispatch(self.delegate, 'on_connection_opened', event)
def on_session_opened(self, event):
if self.delegate:
dispatch(self.delegate, 'on_session_opened', event)
def on_link_opened(self, event):
if self.delegate:
dispatch(self.delegate, 'on_link_opened', event)
def on_connection_opening(self, event):
if self.delegate:
dispatch(self.delegate, 'on_connection_opening', event)
def on_session_opening(self, event):
if self.delegate:
dispatch(self.delegate, 'on_session_opening', event)
def on_link_opening(self, event):
if self.delegate:
dispatch(self.delegate, 'on_link_opening', event)
def on_connection_error(self, event):
if self.delegate:
dispatch(self.delegate, 'on_connection_error', event)
else:
self.log_error(event.connection, "connection")
def on_session_error(self, event):
if self.delegate:
dispatch(self.delegate, 'on_session_error', event)
else:
self.log_error(event.session, "session")
event.connection.close()
def on_link_error(self, event):
if self.delegate:
dispatch(self.delegate, 'on_link_error', event)
else:
self.log_error(event.link, "link")
event.connection.close()
def on_connection_closed(self, event):
if self.delegate:
dispatch(self.delegate, 'on_connection_closed', event)
def on_session_closed(self, event):
if self.delegate:
dispatch(self.delegate, 'on_session_closed', event)
def on_link_closed(self, event):
if self.delegate:
dispatch(self.delegate, 'on_link_closed', event)
def on_connection_closing(self, event):
if self.delegate:
dispatch(self.delegate, 'on_connection_closing', event)
elif self.peer_close_is_error:
self.on_connection_error(event)
def on_session_closing(self, event):
if self.delegate:
dispatch(self.delegate, 'on_session_closing', event)
elif self.peer_close_is_error:
self.on_session_error(event)
def on_link_closing(self, event):
if self.delegate:
dispatch(self.delegate, 'on_link_closing', event)
elif self.peer_close_is_error:
self.on_link_error(event)
def on_transport_tail_closed(self, event):
self.on_transport_closed(event)
def on_transport_closed(self, event):
if self.delegate and event.connection and self.is_local_open(event.connection):
dispatch(self.delegate, 'on_disconnected', event)
class MessagingHandler(Handler, Acking):
"""
A general purpose handler that makes the proton-c events somewhat
simpler to deal with and/or avoids repetitive tasks for common use
cases.
"""
def __init__(self, prefetch=10, auto_accept=True, auto_settle=True, peer_close_is_error=False):
self.handlers = []
if prefetch:
self.handlers.append(CFlowController(prefetch))
self.handlers.append(EndpointStateHandler(peer_close_is_error, self))
self.handlers.append(IncomingMessageHandler(auto_accept, self))
self.handlers.append(OutgoingMessageHandler(auto_settle, self))
def on_connection_error(self, event):
"""
Called when the peer closes the connection with an error condition.
"""
EndpointStateHandler.print_error(event.connection, "connection")
def on_session_error(self, event):
"""
Called when the peer closes the session with an error condition.
"""
EndpointStateHandler.print_error(event.session, "session")
event.connection.close()
def on_link_error(self, event):
"""
Called when the peer closes the link with an error condition.
"""
EndpointStateHandler.print_error(event.link, "link")
event.connection.close()
def on_reactor_init(self, event):
"""
Called when the event loop - the reactor - starts.
"""
if hasattr(event.reactor, 'subclass'):
setattr(event, event.reactor.subclass.__name__.lower(), event.reactor)
self.on_start(event)
def on_start(self, event):
"""
Called when the event loop starts. (Just an alias for on_reactor_init)
"""
pass
def on_connection_closed(self, event):
"""
Called when the connection is closed.
"""
pass
def on_session_closed(self, event):
"""
Called when the session is closed.
"""
pass
def on_link_closed(self, event):
"""
Called when the link is closed.
"""
pass
def on_connection_closing(self, event):
"""
Called when the peer initiates the closing of the connection.
"""
pass
def on_session_closing(self, event):
"""
Called when the peer initiates the closing of the session.
"""
pass
def on_link_closing(self, event):
"""
Called when the peer initiates the closing of the link.
"""
pass
def on_disconnected(self, event):
"""
Called when the socket is disconnected.
"""
pass
def on_sendable(self, event):
"""
Called when the sender link has credit and messages can
therefore be transferred.
"""
pass
def on_accepted(self, event):
"""
Called when the remote peer accepts an outgoing message.
"""
pass
def on_rejected(self, event):
"""
Called when the remote peer rejects an outgoing message.
"""
pass
def on_released(self, event):
"""
Called when the remote peer releases an outgoing message. Note
that this may be in response to either the RELEASE or MODIFIED
state as defined by the AMQP specification.
"""
pass
def on_settled(self, event):
"""
Called when the remote peer has settled the outgoing
message. This is the point at which it shouod never be
retransmitted.
"""
pass
def on_message(self, event):
"""
Called when a message is received. The message itself can be
obtained as a property on the event. For the purpose of
refering to this message in further actions (e.g. if
explicitly accepting it, the ``delivery`` should be used, also
obtainable via a property on the event.
"""
pass
class TransactionHandler(object):
"""
The interface for transaction handlers, i.e. objects that want to
be notified of state changes related to a transaction.
"""
def on_transaction_declared(self, event):
pass
def on_transaction_committed(self, event):
pass
def on_transaction_aborted(self, event):
pass
def on_transaction_declare_failed(self, event):
pass
def on_transaction_commit_failed(self, event):
pass
class TransactionalClientHandler(MessagingHandler, TransactionHandler):
"""
An extension to the MessagingHandler for applications using
transactions.
"""
def __init__(self, prefetch=10, auto_accept=False, auto_settle=True, peer_close_is_error=False):
super(TransactionalClientHandler, self).__init__(prefetch, auto_accept, auto_settle, peer_close_is_error)
def accept(self, delivery, transaction=None):
if transaction:
transaction.accept(delivery)
else:
super(TransactionalClientHandler, self).accept(delivery)
from proton import WrappedHandler
from cproton import pn_flowcontroller, pn_handshaker, pn_iohandler
class CFlowController(WrappedHandler):
def __init__(self, window=1024):
WrappedHandler.__init__(self, lambda: pn_flowcontroller(window))
class CHandshaker(WrappedHandler):
def __init__(self):
WrappedHandler.__init__(self, pn_handshaker)
class IOHandler(WrappedHandler):
def __init__(self):
WrappedHandler.__init__(self, pn_iohandler)
class PythonIO:
def __init__(self):
self.selectables = []
self.delegate = IOHandler()
def on_unhandled(self, method, event):
event.dispatch(self.delegate)
def on_selectable_init(self, event):
self.selectables.append(event.context)
def on_selectable_updated(self, event):
pass
def on_selectable_final(self, event):
sel = event.context
if sel.is_terminal:
self.selectables.remove(sel)
sel.release()
def on_reactor_quiesced(self, event):
reactor = event.reactor
# check if we are still quiesced, other handlers of
# on_reactor_quiesced could have produced events to process
if not reactor.quiesced: return
reading = []
writing = []
deadline = None
for sel in self.selectables:
if sel.reading:
reading.append(sel)
if sel.writing:
writing.append(sel)
if sel.deadline:
if deadline is None:
deadline = sel.deadline
else:
deadline = min(sel.deadline, deadline)
if deadline is not None:
timeout = deadline - time.time()
else:
timeout = reactor.timeout
if (timeout < 0): timeout = 0
timeout = min(timeout, reactor.timeout)
readable, writable, _ = select(reading, writing, [], timeout)
reactor.mark()
now = time.time()
for s in readable:
s.readable()
for s in writable:
s.writable()
for s in self.selectables:
if s.deadline and now > s.deadline:
s.expired()
reactor.yield_()
| wprice/qpid-proton | proton-c/bindings/python/proton/handlers.py | Python | apache-2.0 | 20,366 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark_cassandra.util import as_java_object, as_java_array
from pyspark.streaming.dstream import DStream
from pyspark_cassandra.conf import WriteConf
from pyspark_cassandra.util import helper
from pyspark.serializers import AutoBatchedSerializer, PickleSerializer
def saveToCassandra(dstream, keyspace, table, columns=None, row_format=None, keyed=None,
write_conf=None, **write_conf_kwargs):
ctx = dstream._ssc._sc
gw = ctx._gateway
# create write config as map
write_conf = WriteConf.build(write_conf, **write_conf_kwargs)
write_conf = as_java_object(gw, write_conf.settings())
# convert the columns to a string array
columns = as_java_array(gw, "String", columns) if columns else None
return helper(ctx).saveToCassandra(dstream._jdstream, keyspace, table, columns, row_format,
keyed, write_conf)
def joinWithCassandraTable(dstream, keyspace, table, selected_columns=None, join_columns=None):
"""Joins a DStream (a stream of RDDs) with a Cassandra table
Arguments:
@param dstream(DStream)
The DStream to join. Equals to self when invoking joinWithCassandraTable on a monkey
patched RDD.
@param keyspace(string):
The keyspace to join on.
@param table(string):
The CQL table to join on.
@param selected_columns(string):
The columns to select from the Cassandra table.
@param join_columns(string):
The columns used to join on from the Cassandra table.
"""
ssc = dstream._ssc
ctx = ssc._sc
gw = ctx._gateway
selected_columns = as_java_array(gw, "String", selected_columns) if selected_columns else None
join_columns = as_java_array(gw, "String", join_columns) if join_columns else None
h = helper(ctx)
dstream = h.joinWithCassandraTable(dstream._jdstream, keyspace, table, selected_columns,
join_columns)
dstream = h.pickleRows(dstream)
dstream = h.javaDStream(dstream)
return DStream(dstream, ssc, AutoBatchedSerializer(PickleSerializer()))
# Monkey patch the default python DStream so that data in it can be stored to and joined with
# Cassandra tables
DStream.saveToCassandra = saveToCassandra
DStream.joinWithCassandraTable = joinWithCassandraTable
| TargetHolding/pyspark-cassandra | python/pyspark_cassandra/streaming.py | Python | apache-2.0 | 2,902 |
# Copyright 2018 Rackspace, US Inc.
# Copyright 2019 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import socketserver
import threading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from octavia.api.drivers.driver_agent import driver_get
from octavia.api.drivers.driver_agent import driver_updater
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _recv(recv_socket):
size_str = b''
char = recv_socket.recv(1)
while char != b'\n':
size_str += char
char = recv_socket.recv(1)
payload_size = int(size_str)
mv_buffer = memoryview(bytearray(payload_size))
next_offset = 0
while payload_size - next_offset > 0:
recv_size = recv_socket.recv_into(mv_buffer[next_offset:],
payload_size - next_offset)
next_offset += recv_size
return jsonutils.loads(mv_buffer.tobytes())
class StatusRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
# Get the update data
status = _recv(self.request)
# Process the update
updater = driver_updater.DriverUpdater()
response = updater.update_loadbalancer_status(status)
# Send the response
json_data = jsonutils.dump_as_bytes(response)
len_str = '{}\n'.format(len(json_data)).encode('utf-8')
self.request.send(len_str)
self.request.sendall(json_data)
class StatsRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
# Get the update data
stats = _recv(self.request)
# Process the update
updater = driver_updater.DriverUpdater()
response = updater.update_listener_statistics(stats)
# Send the response
json_data = jsonutils.dump_as_bytes(response)
len_str = '{}\n'.format(len(json_data)).encode('utf-8')
self.request.send(len_str)
self.request.sendall(json_data)
class GetRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
# Get the data request
get_data = _recv(self.request)
# Process the get
response = driver_get.process_get(get_data)
# Send the response
json_data = jsonutils.dump_as_bytes(response)
len_str = '{}\n'.format(len(json_data)).encode('utf-8')
self.request.send(len_str)
self.request.sendall(json_data)
class ForkingUDSServer(socketserver.ForkingMixIn,
socketserver.UnixStreamServer):
pass
def _cleanup_socket_file(filename):
# Remove the socket file if it already exists
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def status_listener(exit_event):
_cleanup_socket_file(CONF.driver_agent.status_socket_path)
server = ForkingUDSServer(CONF.driver_agent.status_socket_path,
StatusRequestHandler)
server.timeout = CONF.driver_agent.status_request_timeout
server.max_children = CONF.driver_agent.status_max_processes
while not exit_event.is_set():
server.handle_request()
LOG.info('Waiting for driver status listener to shutdown...')
# Can't shut ourselves down as we would deadlock, spawn a thread
threading.Thread(target=server.shutdown).start()
LOG.info('Driver status listener shutdown finished.')
server.server_close()
_cleanup_socket_file(CONF.driver_agent.status_socket_path)
def stats_listener(exit_event):
_cleanup_socket_file(CONF.driver_agent.stats_socket_path)
server = ForkingUDSServer(CONF.driver_agent.stats_socket_path,
StatsRequestHandler)
server.timeout = CONF.driver_agent.stats_request_timeout
server.max_children = CONF.driver_agent.stats_max_processes
while not exit_event.is_set():
server.handle_request()
LOG.info('Waiting for driver statistics listener to shutdown...')
# Can't shut ourselves down as we would deadlock, spawn a thread
threading.Thread(target=server.shutdown).start()
LOG.info('Driver statistics listener shutdown finished.')
server.server_close()
_cleanup_socket_file(CONF.driver_agent.stats_socket_path)
def get_listener(exit_event):
_cleanup_socket_file(CONF.driver_agent.get_socket_path)
server = ForkingUDSServer(CONF.driver_agent.get_socket_path,
GetRequestHandler)
server.timeout = CONF.driver_agent.get_request_timeout
server.max_children = CONF.driver_agent.get_max_processes
while not exit_event.is_set():
server.handle_request()
LOG.info('Waiting for driver get listener to shutdown...')
# Can't shut ourselves down as we would deadlock, spawn a thread
threading.Thread(target=server.shutdown).start()
LOG.info('Driver get listener shutdown finished.')
server.server_close()
_cleanup_socket_file(CONF.driver_agent.get_socket_path)
LOG.info("UDS server was closed and socket was cleaned up.")
| openstack/octavia | octavia/api/drivers/driver_agent/driver_listener.py | Python | apache-2.0 | 5,587 |
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hyperparameter sweeps and configs for stage 1 of "abstract_reasoning_study".
Are Disentangled Representations Helpful for Abstract Visual Reasoning?
Sjoerd van Steenkiste, Francesco Locatello, Juergen Schmidhuber, Olivier Bachem.
NeurIPS, 2019.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from disentanglement_lib.config import study
from disentanglement_lib.utils import resources
import disentanglement_lib.utils.hyperparams as h
from six.moves import range
def get_datasets():
"""Returns all the data sets."""
return h.sweep(
"dataset.name",
h.categorical(["shapes3d", "abstract_dsprites"]))
def get_num_latent(sweep):
return h.sweep("encoder.num_latent", h.discrete(sweep))
def get_seeds(num):
"""Returns random seeds."""
return h.sweep("model.random_seed", h.categorical(list(range(num))))
def get_default_models():
"""Our default set of models (6 model * 6 hyperparameters=36 models)."""
# BetaVAE config.
model_name = h.fixed("model.name", "beta_vae")
model_fn = h.fixed("model.model", "@vae()")
betas = h.sweep("vae.beta", h.discrete([1., 2., 4., 6., 8., 16.]))
config_beta_vae = h.zipit([model_name, betas, model_fn])
# AnnealedVAE config.
model_name = h.fixed("model.name", "annealed_vae")
model_fn = h.fixed("model.model", "@annealed_vae()")
iteration_threshold = h.fixed("annealed_vae.iteration_threshold", 100000)
c = h.sweep("annealed_vae.c_max", h.discrete([5., 10., 25., 50., 75., 100.]))
gamma = h.fixed("annealed_vae.gamma", 1000)
config_annealed_beta_vae = h.zipit(
[model_name, c, iteration_threshold, gamma, model_fn])
# FactorVAE config.
model_name = h.fixed("model.name", "factor_vae")
model_fn = h.fixed("model.model", "@factor_vae()")
discr_fn = h.fixed("discriminator.discriminator_fn", "@fc_discriminator")
gammas = h.sweep("factor_vae.gamma",
h.discrete([10., 20., 30., 40., 50., 100.]))
config_factor_vae = h.zipit([model_name, gammas, model_fn, discr_fn])
# DIP-VAE-I config.
model_name = h.fixed("model.name", "dip_vae_i")
model_fn = h.fixed("model.model", "@dip_vae()")
lambda_od = h.sweep("dip_vae.lambda_od",
h.discrete([1., 2., 5., 10., 20., 50.]))
lambda_d_factor = h.fixed("dip_vae.lambda_d_factor", 10.)
dip_type = h.fixed("dip_vae.dip_type", "i")
config_dip_vae_i = h.zipit(
[model_name, model_fn, lambda_od, lambda_d_factor, dip_type])
# DIP-VAE-II config.
model_name = h.fixed("model.name", "dip_vae_ii")
model_fn = h.fixed("model.model", "@dip_vae()")
lambda_od = h.sweep("dip_vae.lambda_od",
h.discrete([1., 2., 5., 10., 20., 50.]))
lambda_d_factor = h.fixed("dip_vae.lambda_d_factor", 1.)
dip_type = h.fixed("dip_vae.dip_type", "ii")
config_dip_vae_ii = h.zipit(
[model_name, model_fn, lambda_od, lambda_d_factor, dip_type])
# BetaTCVAE config.
model_name = h.fixed("model.name", "beta_tc_vae")
model_fn = h.fixed("model.model", "@beta_tc_vae()")
betas = h.sweep("beta_tc_vae.beta", h.discrete([1., 2., 4., 6., 8., 10.]))
config_beta_tc_vae = h.zipit([model_name, model_fn, betas])
all_models = h.chainit([
config_beta_vae, config_factor_vae, config_dip_vae_i, config_dip_vae_ii,
config_beta_tc_vae, config_annealed_beta_vae
])
return all_models
def get_config():
"""Returns the hyperparameter configs for different experiments."""
arch_enc = h.fixed("encoder.encoder_fn", "@conv_encoder", length=1)
arch_dec = h.fixed("decoder.decoder_fn", "@deconv_decoder", length=1)
architecture = h.zipit([arch_enc, arch_dec])
return h.product([
get_datasets(),
architecture,
get_default_models(),
get_seeds(5),
])
class AbstractReasoningStudyV1(study.Study):
"""Defines the study for the paper."""
def get_model_config(self, model_num=0):
"""Returns model bindings and config file."""
config = get_config()[model_num]
model_bindings = h.to_bindings(config)
model_config_file = resources.get_file(
"config/abstract_reasoning_study_v1/stage1/model_configs/shared.gin")
return model_bindings, model_config_file
def get_postprocess_config_files(self):
"""Returns postprocessing config files."""
return list(
resources.get_files_in_folder(
"config/abstract_reasoning_study_v1/stage1/postprocess_configs/"))
def get_eval_config_files(self):
"""Returns evaluation config files."""
return list(
resources.get_files_in_folder(
"config/abstract_reasoning_study_v1/stage1/metric_configs/"))
| google-research/disentanglement_lib | disentanglement_lib/config/abstract_reasoning_study_v1/stage1/sweep.py | Python | apache-2.0 | 5,267 |
"hey iam there yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuyyy"
| 6mandati6/6mandati6 | tt.py | Python | apache-2.0 | 135 |
# ./MARC21relaxed.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:5e592dacc0cf5bbbe827fb7d980f3324ca92c3dc
# Generated 2016-12-21 00:24:34.092428 by PyXB version 1.2.4 using Python 2.7.12.final.0
# Namespace http://www.loc.gov/MARC21/slim
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:773ffeee-c70b-11e6-9daf-00e1020040ea')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.4'
# Generated bindings are not compatible across PyXB versions
#if pyxb.__version__ != _PyXBVersion:
# raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://www.loc.gov/MARC21/slim', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}recordTypeType
class recordTypeType (pyxb.binding.datatypes.NMTOKEN, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'recordTypeType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 63, 2)
_Documentation = None
recordTypeType._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=recordTypeType, enum_prefix=None)
recordTypeType.Bibliographic = recordTypeType._CF_enumeration.addEnumeration(unicode_value='Bibliographic', tag='Bibliographic')
recordTypeType.Authority = recordTypeType._CF_enumeration.addEnumeration(unicode_value='Authority', tag='Authority')
recordTypeType.Holdings = recordTypeType._CF_enumeration.addEnumeration(unicode_value='Holdings', tag='Holdings')
recordTypeType.Classification = recordTypeType._CF_enumeration.addEnumeration(unicode_value='Classification', tag='Classification')
recordTypeType.Community = recordTypeType._CF_enumeration.addEnumeration(unicode_value='Community', tag='Community')
recordTypeType._InitializeFacetMap(recordTypeType._CF_enumeration)
Namespace.addCategoryObject('typeBinding', 'recordTypeType', recordTypeType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}leaderDataType
class leaderDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'leaderDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 82, 2)
_Documentation = None
leaderDataType._CF_pattern = pyxb.binding.facets.CF_pattern()
leaderDataType._CF_pattern.addPattern(pattern='[\\dA-Za-z\\.| ]{24}')
leaderDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
leaderDataType._InitializeFacetMap(leaderDataType._CF_pattern,
leaderDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'leaderDataType', leaderDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}controlDataType
class controlDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'controlDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 99, 2)
_Documentation = None
controlDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
controlDataType._InitializeFacetMap(controlDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'controlDataType', controlDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}controltagDataType
class controltagDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'controltagDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 104, 2)
_Documentation = None
controltagDataType._CF_pattern = pyxb.binding.facets.CF_pattern()
controltagDataType._CF_pattern.addPattern(pattern='[0-9A-Za-z]{3}')
controltagDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
controltagDataType._InitializeFacetMap(controltagDataType._CF_pattern,
controltagDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'controltagDataType', controltagDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}tagDataType
class tagDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tagDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 122, 2)
_Documentation = None
tagDataType._CF_pattern = pyxb.binding.facets.CF_pattern()
tagDataType._CF_pattern.addPattern(pattern='(0([0-9A-Z][0-9A-Z])|0([1-9a-z][0-9a-z]))|(([1-9A-Z][0-9A-Z]{2})|([1-9a-z][0-9a-z]{2}))')
tagDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
tagDataType._InitializeFacetMap(tagDataType._CF_pattern,
tagDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'tagDataType', tagDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}indicatorDataType
class indicatorDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'indicatorDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 128, 2)
_Documentation = None
indicatorDataType._CF_pattern = pyxb.binding.facets.CF_pattern()
indicatorDataType._CF_pattern.addPattern(pattern='[\\da-zA-Z_ ]{1}')
indicatorDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
indicatorDataType._InitializeFacetMap(indicatorDataType._CF_pattern,
indicatorDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'indicatorDataType', indicatorDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}subfieldDataType
class subfieldDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'subfieldDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 142, 2)
_Documentation = None
subfieldDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
subfieldDataType._InitializeFacetMap(subfieldDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'subfieldDataType', subfieldDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}subfieldcodeDataType
class subfieldcodeDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'subfieldcodeDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 147, 2)
_Documentation = None
subfieldcodeDataType._CF_pattern = pyxb.binding.facets.CF_pattern()
subfieldcodeDataType._CF_pattern.addPattern(pattern='[\\dA-Za-z!"#$%&\'()*+,-./:;<=>?{}_^`~\\[\\]\\\\]{1}')
subfieldcodeDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
subfieldcodeDataType._InitializeFacetMap(subfieldcodeDataType._CF_pattern,
subfieldcodeDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'subfieldcodeDataType', subfieldcodeDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}idDataType
class idDataType (pyxb.binding.datatypes.ID):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'idDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 154, 2)
_Documentation = None
idDataType._InitializeFacetMap()
Namespace.addCategoryObject('typeBinding', 'idDataType', idDataType)
# Complex type {http://www.loc.gov/MARC21/slim}collectionType with content type ELEMENT_ONLY
class collectionType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.loc.gov/MARC21/slim}collectionType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'collectionType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 46, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.loc.gov/MARC21/slim}record uses Python identifier record
__record = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'record'), 'record', '__httpwww_loc_govMARC21slim_collectionType_httpwww_loc_govMARC21slimrecord', True, pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 36, 2), )
record = property(__record.value, __record.set, None, 'record is a top level container element for all of the field elements which compose the record')
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_collectionType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 50, 4)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 50, 4)
id = property(__id.value, __id.set, None, None)
_ElementMap.update({
__record.name() : __record
})
_AttributeMap.update({
__id.name() : __id
})
Namespace.addCategoryObject('typeBinding', 'collectionType', collectionType)
# Complex type {http://www.loc.gov/MARC21/slim}recordType with content type ELEMENT_ONLY
class recordType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.loc.gov/MARC21/slim}recordType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'recordType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 52, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.loc.gov/MARC21/slim}leader uses Python identifier leader
__leader = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'leader'), 'leader', '__httpwww_loc_govMARC21slim_recordType_httpwww_loc_govMARC21slimleader', True, pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 55, 8), )
leader = property(__leader.value, __leader.set, None, None)
# Element {http://www.loc.gov/MARC21/slim}controlfield uses Python identifier controlfield
__controlfield = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'controlfield'), 'controlfield', '__httpwww_loc_govMARC21slim_recordType_httpwww_loc_govMARC21slimcontrolfield', True, pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 56, 8), )
controlfield = property(__controlfield.value, __controlfield.set, None, None)
# Element {http://www.loc.gov/MARC21/slim}datafield uses Python identifier datafield
__datafield = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'datafield'), 'datafield', '__httpwww_loc_govMARC21slim_recordType_httpwww_loc_govMARC21slimdatafield', True, pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 57, 8), )
datafield = property(__datafield.value, __datafield.set, None, None)
# Attribute type uses Python identifier type
__type = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'type'), 'type', '__httpwww_loc_govMARC21slim_recordType_type', recordTypeType)
__type._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 60, 4)
__type._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 60, 4)
type = property(__type.value, __type.set, None, None)
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_recordType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 61, 4)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 61, 4)
id = property(__id.value, __id.set, None, None)
_ElementMap.update({
__leader.name() : __leader,
__controlfield.name() : __controlfield,
__datafield.name() : __datafield
})
_AttributeMap.update({
__type.name() : __type,
__id.name() : __id
})
Namespace.addCategoryObject('typeBinding', 'recordType', recordType)
# Complex type {http://www.loc.gov/MARC21/slim}leaderFieldType with content type SIMPLE
class leaderFieldType (pyxb.binding.basis.complexTypeDefinition):
"""MARC21 Leader, 24 bytes"""
_TypeDefinition = leaderDataType
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'leaderFieldType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 72, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is leaderDataType
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_leaderFieldType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 78, 8)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 78, 8)
id = property(__id.value, __id.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__id.name() : __id
})
Namespace.addCategoryObject('typeBinding', 'leaderFieldType', leaderFieldType)
# Complex type {http://www.loc.gov/MARC21/slim}controlFieldType with content type SIMPLE
class controlFieldType (pyxb.binding.basis.complexTypeDefinition):
"""MARC21 Fields 001-009"""
_TypeDefinition = controlDataType
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'controlFieldType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 88, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is controlDataType
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_controlFieldType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 94, 8)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 94, 8)
id = property(__id.value, __id.set, None, None)
# Attribute tag uses Python identifier tag
__tag = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tag'), 'tag', '__httpwww_loc_govMARC21slim_controlFieldType_tag', controltagDataType, required=True)
__tag._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 95, 8)
__tag._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 95, 8)
tag = property(__tag.value, __tag.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__id.name() : __id,
__tag.name() : __tag
})
Namespace.addCategoryObject('typeBinding', 'controlFieldType', controlFieldType)
# Complex type {http://www.loc.gov/MARC21/slim}dataFieldType with content type ELEMENT_ONLY
class dataFieldType (pyxb.binding.basis.complexTypeDefinition):
"""MARC21 Variable Data Fields 010-999"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'dataFieldType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 110, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.loc.gov/MARC21/slim}subfield uses Python identifier subfield
__subfield = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'subfield'), 'subfield', '__httpwww_loc_govMARC21slim_dataFieldType_httpwww_loc_govMARC21slimsubfield', True, pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 115, 6), )
subfield = property(__subfield.value, __subfield.set, None, None)
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_dataFieldType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 117, 4)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 117, 4)
id = property(__id.value, __id.set, None, None)
# Attribute tag uses Python identifier tag
__tag = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tag'), 'tag', '__httpwww_loc_govMARC21slim_dataFieldType_tag', tagDataType, required=True)
__tag._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 118, 4)
__tag._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 118, 4)
tag = property(__tag.value, __tag.set, None, None)
# Attribute ind1 uses Python identifier ind1
__ind1 = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ind1'), 'ind1', '__httpwww_loc_govMARC21slim_dataFieldType_ind1', indicatorDataType, required=True)
__ind1._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 119, 4)
__ind1._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 119, 4)
ind1 = property(__ind1.value, __ind1.set, None, None)
# Attribute ind2 uses Python identifier ind2
__ind2 = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ind2'), 'ind2', '__httpwww_loc_govMARC21slim_dataFieldType_ind2', indicatorDataType, required=True)
__ind2._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 120, 4)
__ind2._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 120, 4)
ind2 = property(__ind2.value, __ind2.set, None, None)
_ElementMap.update({
__subfield.name() : __subfield
})
_AttributeMap.update({
__id.name() : __id,
__tag.name() : __tag,
__ind1.name() : __ind1,
__ind2.name() : __ind2
})
Namespace.addCategoryObject('typeBinding', 'dataFieldType', dataFieldType)
# Complex type {http://www.loc.gov/MARC21/slim}subfieldatafieldType with content type SIMPLE
class subfieldatafieldType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.loc.gov/MARC21/slim}subfieldatafieldType with content type SIMPLE"""
_TypeDefinition = subfieldDataType
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'subfieldatafieldType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 134, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is subfieldDataType
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_subfieldatafieldType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 137, 8)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 137, 8)
id = property(__id.value, __id.set, None, None)
# Attribute code uses Python identifier code
__code = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'code'), 'code', '__httpwww_loc_govMARC21slim_subfieldatafieldType_code', subfieldcodeDataType, required=True)
__code._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 138, 8)
__code._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 138, 8)
code = property(__code.value, __code.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__id.name() : __id,
__code.name() : __code
})
Namespace.addCategoryObject('typeBinding', 'subfieldatafieldType', subfieldatafieldType)
record = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'record'), recordType, nillable=pyxb.binding.datatypes.boolean(1), documentation='record is a top level container element for all of the field elements which compose the record', location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 36, 2))
Namespace.addCategoryObject('elementBinding', record.name().localName(), record)
collection = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'collection'), collectionType, nillable=pyxb.binding.datatypes.boolean(1), documentation='collection is a top level container element for 0 or many records', location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 41, 2))
Namespace.addCategoryObject('elementBinding', collection.name().localName(), collection)
collectionType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'record'), recordType, nillable=pyxb.binding.datatypes.boolean(1), scope=collectionType, documentation='record is a top level container element for all of the field elements which compose the record', location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 36, 2)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 47, 4))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(collectionType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'record')), pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 48, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
collectionType._Automaton = _BuildAutomaton()
recordType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'leader'), leaderFieldType, scope=recordType, location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 55, 8)))
recordType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'controlfield'), controlFieldType, scope=recordType, location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 56, 8)))
recordType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'datafield'), dataFieldType, scope=recordType, location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 57, 8)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 54, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(recordType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'leader')), pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 55, 8))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(recordType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'controlfield')), pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 56, 8))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(recordType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'datafield')), pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 57, 8))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
st_2._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
recordType._Automaton = _BuildAutomaton_()
dataFieldType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'subfield'), subfieldatafieldType, scope=dataFieldType, location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 115, 6)))
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(dataFieldType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'subfield')), pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 115, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
dataFieldType._Automaton = _BuildAutomaton_2()
| PixelDragon/pixeldragon | MARC21relaxed.py | Python | apache-2.0 | 30,253 |
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\
common import data_models
class Interface(data_models.BaseDataModel):
def __init__(self, id=None, compute_id=None, network_id=None,
fixed_ips=None, port_id=None):
self.id = id
self.compute_id = compute_id
self.network_id = network_id
self.port_id = port_id
self.fixed_ips = fixed_ips
class Delta(data_models.BaseDataModel):
def __init__(self, amphora_id=None, compute_id=None,
add_nics=None, delete_nics=None):
self.compute_id = compute_id
self.amphora_id = amphora_id
self.add_nics = add_nics
self.delete_nics = delete_nics
class Network(data_models.BaseDataModel):
def __init__(self, id=None, name=None, subnets=None,
project_id=None, admin_state_up=None, mtu=None,
provider_network_type=None,
provider_physical_network=None,
provider_segmentation_id=None,
router_external=None):
self.id = id
self.name = name
self.subnets = subnets
self.project_id = project_id
self.admin_state_up = admin_state_up
self.provider_network_type = provider_network_type
self.provider_physical_network = provider_physical_network
self.provider_segmentation_id = provider_segmentation_id
self.router_external = router_external
self.mtu = mtu
class Subnet(data_models.BaseDataModel):
def __init__(self, id=None, name=None, network_id=None, project_id=None,
gateway_ip=None, cidr=None, ip_version=None):
self.id = id
self.name = name
self.network_id = network_id
self.project_id = project_id
self.gateway_ip = gateway_ip
self.cidr = cidr
self.ip_version = ip_version
class Port(data_models.BaseDataModel):
def __init__(self, id=None, name=None, device_id=None, device_owner=None,
mac_address=None, network_id=None, status=None,
project_id=None, admin_state_up=None, fixed_ips=None,
network=None):
self.id = id
self.name = name
self.device_id = device_id
self.device_owner = device_owner
self.mac_address = mac_address
self.network_id = network_id
self.status = status
self.project_id = project_id
self.admin_state_up = admin_state_up
self.fixed_ips = fixed_ips or []
self.network = network
def get_subnet_id(self, fixed_ip_address):
for fixed_ip in self.fixed_ips:
if fixed_ip.ip_address == fixed_ip_address:
return fixed_ip.subnet_id
class FixedIP(data_models.BaseDataModel):
def __init__(self, subnet_id=None, ip_address=None, subnet=None):
self.subnet_id = subnet_id
self.ip_address = ip_address
self.subnet = subnet
class AmphoraNetworkConfig(data_models.BaseDataModel):
def __init__(self, amphora=None, vip_subnet=None, vip_port=None,
vrrp_subnet=None, vrrp_port=None, ha_subnet=None,
ha_port=None):
self.amphora = amphora
self.vip_subnet = vip_subnet
self.vip_port = vip_port
self.vrrp_subnet = vrrp_subnet
self.vrrp_port = vrrp_port
self.ha_subnet = ha_subnet
self.ha_port = ha_port
| jiahaoliang/group-based-policy | gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/network/data_models.py | Python | apache-2.0 | 4,043 |
from django.shortcuts import render
from django.http import HttpResponse
from django.utils import simplejson as json
import ner
def index(request):
params = {'current': 'home'}
return render(request, 'index.html', params)
def name_entity_recognition(request):
if request.method == 'GET':
#Get the array that contains the list of texts to recognize
input_text_array = request.GET.getlist('text[]')
data = {}
i=0
for text in input_text_array:
#Recognize all strings / texts contained in the array
data[i] = ner.recognize(text.strip())
i+=1
return HttpResponse(json.dumps(data), content_type = "application/json") | smouzakitis/molly | molly/views.py | Python | apache-2.0 | 711 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import pytest
import redact
GCLOUD_PROJECT = os.getenv("GOOGLE_CLOUD_PROJECT")
RESOURCE_DIRECTORY = os.path.join(os.path.dirname(__file__), "resources")
@pytest.fixture(scope="module")
def tempdir():
tempdir = tempfile.mkdtemp()
yield tempdir
shutil.rmtree(tempdir)
def test_redact_image_file(tempdir, capsys):
test_filepath = os.path.join(RESOURCE_DIRECTORY, "test.png")
output_filepath = os.path.join(tempdir, "redacted.png")
redact.redact_image(
GCLOUD_PROJECT, test_filepath, output_filepath, ["FIRST_NAME", "EMAIL_ADDRESS"],
)
out, _ = capsys.readouterr()
assert output_filepath in out
def test_redact_image_all_text(tempdir, capsys):
test_filepath = os.path.join(RESOURCE_DIRECTORY, "test.png")
output_filepath = os.path.join(tempdir, "redacted.png")
redact.redact_image_all_text(
GCLOUD_PROJECT, test_filepath, output_filepath,
)
out, _ = capsys.readouterr()
assert output_filepath in out
| googleapis/python-dlp | samples/snippets/redact_test.py | Python | apache-2.0 | 1,601 |
class RegisterPair:
def __init__(self, name, register_high, register_low):
self.name = name
self.register_high = register_high
self.register_low = register_low
| AlexLitvino/i8080_simulator | microprocessor/register_pair.py | Python | apache-2.0 | 189 |
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" flowlabels are abbreviations that can be used to identify a flow. Flows do
not have a single unique attribute, which makes them difficult to identify.
flows solve that problem.
flowlabels have 2 parts:
origin node index
destination node index
Example:
flowlabel 1_2 means the flow from the node at index 1 to the node at index 2
"""
import re
def parse_flowlabel(flowlabel):
""" Parses a flowlabel into a tuple """
result = re.findall("(^\d+)(_)(\d+$)", flowlabel)
if len(result) == 0:
raise Exception("Invalid flowlabel %s"%flowlabel)
return (int(result[0][0]), int(result[0][2]))
def gen_flowlabel(origin_index, destination_index):
""" generate a flowlabel """
return "%d_%d"%(origin_index, destination_index)
| viccro/diarc | fabrik/flowlabel.py | Python | apache-2.0 | 1,392 |
"""
Ecks plugin to collect system memory usage information
Copyright 2011 Chris Read ([email protected])
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def get_memory(parent, host, community):
""" This is a plugin to be loaded by Ecks
return a tuple containing (total_swap, avail_swap, total_real, avail_real, mem_buffer, mem_cached). Values are in kiloBytes
"""
memory = (1,3,6,1,4,1,2021,4) # UCD-SNMP-MIB
data = parent.get_snmp_data(host, community, memory, 1)
if data:
return map(parent._build_answer,
parent._extract(data, int, 3),
parent._extract(data, int, 4),
parent._extract(data, int, 5),
parent._extract(data, int, 6),
parent._extract(data, int, 14),
parent._extract(data, int, 15),
)[0]
| cread/ecks | ecks/plugins/memory.py | Python | apache-2.0 | 1,335 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-09-14 23:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organization', '0004_teacher_image'),
('courses', '0006_auto_20170914_2345'),
]
operations = [
migrations.AddField(
model_name='course',
name='teacher',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organization.Teacher', verbose_name='\u8bb2\u5e08'),
),
]
| LennonChin/Django-Practices | MxOnline/apps/courses/migrations/0007_course_teacher.py | Python | apache-2.0 | 640 |
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import argument, resource, serializers
from .provisioner import Provisioner
class Output(resource.Resource):
resource_name = "output"
name = argument.String()
provisioner = argument.Resource(Provisioner)
class OutputAsString(serializers.Serializer):
def __init__(self, resource):
self.resource = resource
def render(self, runner, object):
if self.pending(runner, object):
return serializers.Pending(self.resource)
# Extract the contents from the file on the (potentially remote) target
service = runner.get_service(self.resource.provisioner.target, "describe")
client = service.get_client()
return client.get_path_contents(self.resource.name)
def pending(self, runner, object):
provisioner = runner.get_service(self.resource.provisioner, "apply")
return provisioner.object["Result"] == "Pending"
def dependencies(self, object):
return frozenset((self.resource,))
argument.String.register_adapter(Output, lambda r: OutputAsString(r))
class OutputAsBytes(serializers.Serializer):
def __init__(self, resource):
self.resource = resource
def render(self, runner, object):
if self.pending(runner, object):
return serializers.Pending(self.resource)
# Extract the contents from the file on the (potentially remote) target
service = runner.get_service(self.resource.provisioner.target, "describe")
client = service.get_client()
return client.get_path_bytes(self.resource.name)
def pending(self, runner, object):
provisioner = runner.get_service(self.resource.provisioner, "apply")
return provisioner.object["Result"] == "Pending"
def dependencies(self, object):
return frozenset((self.resource,))
argument.Bytes.register_adapter(Output, lambda r: OutputAsBytes(r))
| yaybu/touchdown | touchdown/provisioner/output.py | Python | apache-2.0 | 2,488 |
from webfs import WebDirParser
testDoc = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<html>
<head>
<title>Index of /ubuntu</title>
</head>
<body>
<h1>Index of /ubuntu</h1>
<pre><img src="/icons/blank.gif" alt="Icon "> <a href="?C=N;O=D">Name</a> <a href="?C=M;O=A">Last modified</a> <a href="?C=S;O=A">Size</a> <a href="?C=D;O=A">Description</a><hr><img src="/icons/back.gif" alt="[DIR]"> <a href="/">Parent Directory</a> -
<img src="/icons/folder.gif" alt="[DIR]"> <a href="dists/">dists/</a> 18-Jun-2014 12:46 -
<img src="/icons/folder.gif" alt="[DIR]"> <a href="indices/">indices/</a> 28-Apr-2008 17:47 -
<img src="/icons/compressed.gif" alt="[ ]"> <a href="ls-lR.gz">ls-lR.gz</a> 28-Apr-2008 16:05 4.5M
<img src="/icons/folder.gif" alt="[DIR]"> <a href="pool/">pool/</a> 14-Jan-2008 22:05 -
<img src="/icons/folder.gif" alt="[DIR]"> <a href="project/">project/</a> 28-Jun-2013 11:52 -
<hr></pre>
<address>Apache/2.2.22 (Ubuntu) Server at old-releases.ubuntu.com Port 80</address>
</body></html>
"""
def Test_ParsingTest():
wp = WebDirParser()
wp.feed(testDoc)
assert len(wp.entries) == 5
assert wp.entries.keys().sort() == ['dist', 'indices', 'ls-lR.gz', 'pool', 'project'].sort(),\
wp.entries.keys() | harun-emektar/webfs | tests/Test_WebDirParser.py | Python | apache-2.0 | 1,416 |
from bacnet import BACNetTransform
from cwmp import CWMPTransform
from dns import DNSTransform
from ftp import FTPTransform
from http import HTTPTransform
from http import HTTPWWWTransform
from https import HTTPSTransform
from https import HTTPSGetTransform
from https import HTTPSWWWTransform
from https import HeartbleedTransform
from https import RSAExportTransform
from https import DHETransform
from https import DHEExportTransform
from https import ECDHETransform
from https import TLSv10Transform
from https import TLSv11Transform
from https import TLSv12Transform
from https import TLSv13Transform
from https import SSLv3Transform
from imap import IMAPStartTLSTransform
from imap import IMAPSTransform
from modbus import ModbusTransform
from ntp import NTPTransform
from pop3 import POP3StartTLSTransform
from pop3 import POP3STransform
from s7 import S7Transform
from smtp import SMTPStartTLSTransform
from smtp import SMTPSTransform
from ssh import SSHV2Transform
from telnet import TelnetTransform
from upnp import UPnPTransform
from fox import NiagaraFoxTransform
from dnp3 import DNP3Transform
from sslv2 import SSLv2Transform
from smb import SMBTransform
from oracle import OracleTransform
from postgres import PostgresTransform
from mongodb import MongoDBTransform
from mssql import MSSQLTransform
from mysql import MySQLTransform
from ipp import IPPTransform | zmap/ztag | ztag/transforms/__init__.py | Python | apache-2.0 | 1,377 |
"""
@author: ArcGIS for Intelligence
@contact: [email protected]
@company: Esri
@version: 1.0
@description: Used to stage the apps for Movement Analysis
@requirements: Python 2.7.x, ArcGIS 10.3.1
@copyright: Esri, 2015
"""
import arcresthelper
from arcresthelper import portalautomation
log_file='./logs/DamageAssessment.log'
configFiles= ['./configs/StageApp.json']
globalLoginInfo = './configs/GlobalLoginInfo.json'
dateTimeFormat = '%Y-%m-%d %H:%M'
pa = portalautomation.portalautomation(globalLoginInfo)
pa.setLog(log_file=log_file)
pa.publishfromconfig(configFiles=configFiles,
combinedApp=None,
dateTimeFormat=dateTimeFormat)
del pa | conklinbd/MovementAnalysis | TemplateInstall/PortalDeploy/StageApp.py | Python | apache-2.0 | 726 |
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traitsui.api import View, Item, VGroup, InstanceEditor, UItem, EnumEditor, \
RangeEditor, spring, HGroup, Group, ButtonEditor
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.ui.custom_label_editor import CustomLabel
from pychron.core.ui.led_editor import LEDEditor
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.lasers.tasks.laser_panes import ClientPane
class AblationCO2ClientPane(ClientPane):
def trait_context(self):
ctx = super(AblationCO2ClientPane, self).trait_context()
ctx['tray_calibration'] = self.model.stage_manager.tray_calibration_manager
ctx['stage_manager'] = self.model.stage_manager
return ctx
def traits_view(self):
pos_grp = VGroup(UItem('move_enabled_button',
editor=ButtonEditor(label_value='move_enabled_label')),
VGroup(HGroup(Item('position'),
UItem('stage_manager.stage_map_name',
editor=EnumEditor(name='stage_manager.stage_map_names')),
UItem('stage_stop_button')),
Item('x', editor=RangeEditor(low_name='stage_manager.xmin',
high_name='stage_manager.xmax')),
Item('y', editor=RangeEditor(low_name='stage_manager.ymin',
high_name='stage_manager.ymax')),
Item('z', editor=RangeEditor(low_name='stage_manager.zmin',
high_name='stage_manager.zmax')),
enabled_when='_move_enabled'),
label='Positioning')
calibration_grp = VGroup(UItem('tray_calibration.style',
enabled_when='not tray_calibration.isCalibrating()'),
UItem('tray_calibration.calibrate',
editor=ButtonEditor(label_value='tray_calibration.calibration_step')),
HGroup(Item('tray_calibration.cx', format_str='%0.3f', style='readonly'),
Item('tray_calibration.cy', format_str='%0.3f', style='readonly')),
Item('tray_calibration.rotation', format_str='%0.3f', style='readonly'),
Item('tray_calibration.scale', format_str='%0.4f', style='readonly'),
Item('tray_calibration.error', format_str='%0.2f', style='readonly'),
UItem('tray_calibration.calibrator', style='custom', editor=InstanceEditor()),
CustomLabel('tray_calibration.calibration_help',
color='green',
height=75, width=300),
label='Tray Calibration')
tgrp = Group(pos_grp, calibration_grp, layout='tabbed')
egrp = HGroup(UItem('enabled', editor=LEDEditor(colors=['red', 'green'])),
UItem('enable', editor=ButtonEditor(label_value='enable_label')),
UItem('fire_laser_button', editor=ButtonEditor(label_value='fire_label'),
enabled_when='enabled'),
Item('output_power', label='Power'),
UItem('units'),
spring,
icon_button_editor('snapshot_button', 'camera'),
icon_button_editor('test_connection_button',
'connect', tooltip='Test Connection'))
v = View(VGroup(egrp, tgrp))
return v
# ============= EOF =============================================
| UManPychron/pychron | pychron/lasers/tasks/panes/ablation.py | Python | apache-2.0 | 4,882 |
# -*- coding: utf-8 -*-
__author__ = 'eveliotc'
__license__ = 'See LICENSE'
import alfred
from alfred import Item
import sys
from subprocess import Popen, PIPE
def json_to_obj(x):
if isinstance(x, dict):
return type('X', (), {k: json_to_obj(v) for k, v in x.iteritems()})
else:
return x
def join_query(dic):
return ' '.join(dic)
def le_result(r, exit = True):
alfred.write(r)
if exit:
sys.exit()
def xml_result(r, exit = True):
if len(r) < 1:
empty_result(exit)
else:
le_result(alfred.xml(r), exit)
def empty_result(exit = True):
empty = Item(
attributes={'uid': alfred.uid('empty'), 'arg': ''},
title='Gradle Please',
subtitle=u':( Nothing found.',
icon=u'icon.png')
xml_result([empty], exit)
def apple_script(scpt, args=[]):
p = Popen(['osascript', '-'] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(scpt)
return stdout
def tell_alfred(what):
apple_script('tell application "Alfred 2" to search "%s"' % what)
# TODO refactor gp.py to use this instead of dynamic obj
class Pom(object):
a = ''
g = ''
p = ''
latestVersion = ''
source = ''
@property
def id(self):
return self.g + ':' + self.a
def __repr__(self):
#notjson #justdebugginthings
return '{id:%s a:%s g:%s p:%s v:%s}' % (self.id, self.a, self.g, self.p, self.latestVersion)
| eveliotc/gradleplease-workflow | common.py | Python | apache-2.0 | 1,475 |
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flash_kinetis import Flash_Kinetis
flash_algo = { 'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x4831b510, 0x6041492f, 0x60814930, 0x22806801, 0x22204391, 0x60014311, 0x4448482d, 0xf8cef000,
0xd0002800, 0xbd102001, 0x47702000, 0xb5104828, 0x44484928, 0xf8aef000, 0xd1050004, 0x21004824,
0xf0004448, 0x4604f983, 0xf835f000, 0xbd104620, 0x4d1fb570, 0x444d4606, 0x4b1e4601, 0x68ea4628,
0xf85ef000, 0xd1060004, 0x46312300, 0x68ea4628, 0xf934f000, 0xf0004604, 0x4620f81e, 0xb5febd70,
0x460b460d, 0x46014607, 0x46164811, 0xf0004448, 0x0004f8f5, 0x9001d10b, 0x21019002, 0x9100480c,
0x462a4633, 0x44484639, 0xf95ef000, 0xf0004604, 0x4620f802, 0x4808bdfe, 0x220168c1, 0x43110292,
0x477060c1, 0xd928c520, 0x40076000, 0x0000ffff, 0x00000004, 0x6b65666b, 0xf0003000, 0x2800b500,
0x2a00d009, 0x000bd007, 0xfa35f000, 0x0b0b0708, 0x13110f0d, 0x20041715, 0x68c0bd00, 0x20006010,
0x6840bd00, 0x6880e7fa, 0x6800e7f8, 0x2001e7f6, 0x6900e7f4, 0x6940e7f2, 0x206ae7f0, 0x0000bd00,
0x4607b5f8, 0x460d4614, 0xf0004618, 0x2800f889, 0x2308d12a, 0x46294622, 0xf0004638, 0x0006f867,
0x192cd122, 0x68f91e64, 0x91004620, 0xf956f000, 0xd0162900, 0x1c409c00, 0x1e644344, 0x480be011,
0x68004478, 0x490a6005, 0x71c82009, 0xf92ef000, 0x69b84606, 0xd0002800, 0x2e004780, 0x68f8d103,
0x42a51945, 0x4630d9eb, 0x0000bdf8, 0x0000042c, 0x40020000, 0x4604b510, 0xf0004608, 0x2800f851,
0x2c00d106, 0x4904d005, 0x71c82044, 0xf90ef000, 0x2004bd10, 0x0000bd10, 0x40020000, 0x2800b510,
0x492ad019, 0x4a2a68c9, 0x00490e09, 0x5a51447a, 0xd0120309, 0x60022200, 0x21026041, 0x02896081,
0x492460c1, 0x158b7a0c, 0x610340a3, 0x61827ac9, 0x46106141, 0x2004bd10, 0x2064bd10, 0x2800bd10,
0x6181d002, 0x47702000, 0x47702004, 0x2800b510, 0x1e5bd004, 0x421c460c, 0xe001d104, 0xbd102004,
0xd001421a, 0xbd102065, 0x428b6803, 0x6840d804, 0x18181889, 0xd2014288, 0xbd102066, 0xbd102000,
0x4288490d, 0x206bd001, 0x20004770, 0x28004770, 0x290fd008, 0x2a04d802, 0xe005d104, 0xd8012913,
0xd0012a08, 0x47702004, 0x47702000, 0x40075040, 0x000003a0, 0x40020020, 0x6b65666b, 0xb081b5ff,
0x0015461e, 0xd007460f, 0x46322304, 0xf7ff9801, 0x0004ffbd, 0xe018d101, 0xb0052004, 0x480dbdf0,
0x68014478, 0xcd02600f, 0x60416800, 0x2006490a, 0xf00071c8, 0x4604f88b, 0x69809801, 0xd0002800,
0x2c004780, 0x1d3fd103, 0x2e001f36, 0x4620d1e7, 0x0000e7e3, 0x000002ec, 0x40020000, 0xb081b5ff,
0x460e4614, 0x23084605, 0xff90f7ff, 0xd1272800, 0x686868a9, 0xf882f000, 0x42719000, 0x40014240,
0x42b5424d, 0x9800d101, 0x2c00182d, 0x1bafd017, 0xd90042a7, 0x480b4627, 0x447808f9, 0x60066800,
0x22014809, 0x0a0a71c2, 0x728172c2, 0x72419904, 0xf84cf000, 0xd1032800, 0x19f61be4, 0x2000e7e3,
0xbdf0b005, 0x00000272, 0x40020000, 0x2800b510, 0x4804d006, 0x71c22240, 0xf0007181, 0xbd10f837,
0xbd102004, 0x40020000, 0x9f08b5f8, 0x4616001c, 0xd005460d, 0xf7ff2304, 0x2800ff49, 0xe01dd101,
0xbdf82004, 0x4478480f, 0x600d6801, 0x2202490e, 0x9a0671ca, 0x680072ca, 0x60816821, 0xf816f000,
0xd0082800, 0x29009907, 0x600dd000, 0xd0e82f00, 0x60392100, 0x1f36bdf8, 0x1d2d1d24, 0xd1e12e00,
0x0000bdf8, 0x00000206, 0x40020000, 0x2170480a, 0x21807001, 0x78017001, 0xd5fc0609, 0x06817800,
0x2067d501, 0x06c14770, 0x2068d501, 0x07c04770, 0x2069d0fc, 0x00004770, 0x40020000, 0x09032200,
0xd32c428b, 0x428b0a03, 0x2300d311, 0xe04e469c, 0x430b4603, 0x2200d43c, 0x428b0843, 0x0903d331,
0xd31c428b, 0x428b0a03, 0x4694d301, 0x09c3e03f, 0xd301428b, 0x1ac001cb, 0x09834152, 0xd301428b,
0x1ac0018b, 0x09434152, 0xd301428b, 0x1ac0014b, 0x09034152, 0xd301428b, 0x1ac0010b, 0x08c34152,
0xd301428b, 0x1ac000cb, 0x08834152, 0xd301428b, 0x1ac0008b, 0x08434152, 0xd301428b, 0x1ac0004b,
0x1a414152, 0x4601d200, 0x46104152, 0xe05d4770, 0xd0000fca, 0x10034249, 0x4240d300, 0x22004053,
0x0903469c, 0xd32d428b, 0x428b0a03, 0x22fcd312, 0xba120189, 0x428b0a03, 0x0189d30c, 0x428b1192,
0x0189d308, 0x428b1192, 0x0189d304, 0x1192d03a, 0x0989e000, 0x428b09c3, 0x01cbd301, 0x41521ac0,
0x428b0983, 0x018bd301, 0x41521ac0, 0x428b0943, 0x014bd301, 0x41521ac0, 0x428b0903, 0x010bd301,
0x41521ac0, 0x428b08c3, 0x00cbd301, 0x41521ac0, 0x428b0883, 0x008bd301, 0x41521ac0, 0x0843d2d9,
0xd301428b, 0x1ac0004b, 0x1a414152, 0x4601d200, 0x41524663, 0x4610105b, 0x4240d301, 0xd5002b00,
0x47704249, 0x105b4663, 0x4240d300, 0x2000b501, 0x46c046c0, 0xb430bd02, 0x1e644674, 0x1c647825,
0xd20042ab, 0x5d63461d, 0x18e3005b, 0x4718bc30, 0x00040002, 0x00080000, 0x00100000, 0x00200000,
0x00400000, 0x00000080, 0x00000000, 0x00800000, 0x40020004, 0x00000000,
],
'pc_init' : 0x20000021,
'pc_eraseAll' : 0x2000004D,
'pc_erase_sector' : 0x20000071,
'pc_program_page' : 0x2000009F,
'begin_stack' : 0x20000800,
'begin_data' : 0x20000a00, # Analyzer uses a max of 2 KB data (512 pages * 4 bytes / page)
'page_buffers' : [0x20000a00, 0x20001200], # Enable double buffering
'static_base' : 0x20000000 + 0x20 + 0x594,
'page_size' : 2048,
'analyzer_supported' : True,
'analyzer_address' : 0x1fffa000
};
# @brief Flash algorithm for Kinetis L-series devices.
class Flash_kl28z(Flash_Kinetis):
def __init__(self, target):
super(Flash_kl28z, self).__init__(target, flash_algo)
| geky/pyOCD | pyOCD/flash/flash_kl28z.py | Python | apache-2.0 | 6,139 |
from nativedroid.protobuf.java_signatures_pb2 import *
__author__ = "Fengguo Wei"
__copyright__ = "Copyright 2018, The Argus-SAF Project"
__license__ = "Apache v2.0"
def java_package_str(java_package_pb, delimiter):
"""
Return full string of a java package proto.
:param JavaPackage java_package_pb: java_signatures_pb2.JavaPackage
:param str delimiter:
:return: str
"""
pkg_str = java_package_pb.name
tmp = java_package_pb
while tmp.HasField('parent'):
tmp = tmp.parent
pkg_str = tmp.name + delimiter + pkg_str
return pkg_str
def primitive_type_str(primitive_type_pb, is_signature):
"""
Return full string of a primitive type proto.
:param PrimitiveType primitive_type_pb: java_signatures_pb2.PrimitiveType
:param bool is_signature: normal form int, signature form I
:return: str
"""
if primitive_type_pb.type == PrimitiveType.BYTE:
if is_signature:
return 'B'
else:
return 'byte'
elif primitive_type_pb.type == PrimitiveType.SHORT:
if is_signature:
return 'S'
else:
return 'short'
elif primitive_type_pb.type == PrimitiveType.INT:
if is_signature:
return 'I'
else:
return 'int'
elif primitive_type_pb.type == PrimitiveType.FLOAT:
if is_signature:
return 'F'
else:
return 'float'
elif primitive_type_pb.type == PrimitiveType.BOOLEAN:
if is_signature:
return 'Z'
else:
return 'boolean'
elif primitive_type_pb.type == PrimitiveType.CHAR:
if is_signature:
return 'C'
else:
return 'char'
elif primitive_type_pb.type == PrimitiveType.LONG:
if is_signature:
return 'L'
else:
return 'long'
elif primitive_type_pb.type == PrimitiveType.DOUBLE:
if is_signature:
return 'D'
else:
return 'double'
def class_type_str(class_type_pb, is_signature):
"""
Return full string of a class type proto.
:param ClassType class_type_pb: java_signatures_pb2.ClassType
:param bool is_signature: normal form java.lang.Object, signature form Ljava/lang/Object;
:return: str
"""
type_str = class_type_pb.name
if is_signature:
delimiter = '/'
else:
delimiter = '.'
if class_type_pb.HasField('package'):
type_str = java_package_str(class_type_pb.package, delimiter) + delimiter + type_str
if class_type_pb.unknown:
type_str += '?'
if is_signature:
type_str = 'L' + type_str + ';'
return type_str
def java_type_str(java_type_pb, is_signature):
"""
Return full string of a java type proto.
:param JavaType java_type_pb: java_signatures_pb2.JavaType
:param bool is_signature: normal form java.lang.Object[], signature form [Ljava/lang/Object;
:return: str
"""
if java_type_pb.HasField('primitive_type'):
type_str = primitive_type_str(java_type_pb.primitive_type, is_signature)
else:
type_str = class_type_str(java_type_pb.class_type, is_signature)
dimension = java_type_pb.dimension
while dimension > 0:
if is_signature:
type_str = '[' + type_str
else:
type_str += '[]'
dimension -= 1
return type_str
def method_proto_str(method_proto_pb):
"""
Return full string of a method proto proto.
:param MethodProto method_proto_pb: java_signatures_pb2.MethodProto
:return: str
"""
proto = '('
for param in method_proto_pb.param_types:
proto += java_type_str(param, True)
proto += ')'
if method_proto_pb.HasField('return_java_type'):
proto += java_type_str(method_proto_pb.return_java_type, True)
else:
proto += 'V'
return proto
def method_signature_str(method_signature_pb):
"""
Return full string of a method signature proto.
:param MethodSignature method_signature_pb: java_signatures_pb2.MethodSignature
:return: str
"""
owner_str = java_type_str(method_signature_pb.owner, is_signature=True)
proto_str = method_proto_str(method_signature_pb.proto)
return owner_str + '.' + method_signature_pb.name + ':' + proto_str
def get_params_from_method_signature(method_signature_pb, is_static):
"""
Get parameter types from method signature.
:param MethodSignature method_signature_pb: java_signatures_pb2.MethodSignature
:param bool is_static: is static method
:return: list of JavaType
"""
param_types = []
if not is_static:
param_types.append(method_signature_pb.owner)
param_types.extend(method_signature_pb.proto.param_types)
return param_types
| arguslab/Argus-SAF | nativedroid/nativedroid/jawa/utils.py | Python | apache-2.0 | 4,799 |
class Solution:
def uniquePathsIII(self, grid: 'List[List[int]]') -> 'int':
self.res = 0
R, C, E = len(grid), len(grid[0]), 1
for i in range(R):
for j in range(C):
if grid[i][j] == 1: x,y = (i, j)
elif grid[i][j] == 2: end = (i, j)
elif grid[i][j] == 0: E += 1
def dfs(x, y, E):
if not (0 <= x < R and 0 <= y < C and grid[x][y] >= 0): return
if (x,y) == end and E == 0:
self.res += 1
return
grid[x][y] = -2
dfs(x+1, y, E-1)
dfs(x-1, y, E-1)
dfs(x, y+1, E-1)
dfs(x, y-1, E-1)
grid[x][y] = 0
dfs(x, y, E)
return self.res
s = Solution()
print(s.uniquePathsIII([[1,0,0,0],[0,0,0,0],[0,0,2,-1]]))
print(s.uniquePathsIII([[1,0,0,0],[0,0,0,0],[0,0,0,2]]))
print(s.uniquePathsIII([[0,1],[2,0]]))
| zuun77/givemegoogletshirts | leetcode/python/980_unique_paths_III.py | Python | apache-2.0 | 938 |
#!/usr/bin/python
# Copyright (c) 2014 SwiftStack, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
version = "__VERSION__"
setup(
name="swift_undelete",
version=version,
description='Undelete middleware for OpenStack Swift',
license='Apache License (2.0)',
author='Samuel N. Merritt',
author_email='[email protected]',
url='https://github.com/swiftstack/swift_undelete',
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Environment :: No Input/Output (Daemon)'],
# Ubuntu packaging incorrectly detects this as a dependency on the
# "python-swift" package, which SwiftStack doesn't use. So commenting this
# out so SwiftStack can still use ${python:Depends}
#install_requires=["swift"],
test_suite='nose.collector',
tests_require=["nose"],
scripts=[],
entry_points={
'paste.filter_factory': ['undelete=swift_undelete:filter_factory']})
| caiobrentano/swift_undelete | setup.py | Python | apache-2.0 | 1,663 |
import os
import time
import logging
import math
from parsl.channels import LocalChannel
from parsl.launchers import SingleNodeLauncher
from parsl.providers.cluster_provider import ClusterProvider
from parsl.providers.lsf.template import template_string
from parsl.providers.provider_base import JobState, JobStatus
from parsl.utils import RepresentationMixin, wtime_to_minutes
logger = logging.getLogger(__name__)
translate_table = {
'PEND': JobState.PENDING,
'RUN': JobState.RUNNING,
'DONE': JobState.COMPLETED,
'EXIT': JobState.FAILED, # (failed),
'PSUSP': JobState.CANCELLED,
'USUSP': JobState.CANCELLED,
'SSUSP': JobState.CANCELLED,
}
class LSFProvider(ClusterProvider, RepresentationMixin):
"""LSF Execution Provider
This provider uses sbatch to submit, squeue for status and scancel to cancel
jobs. The sbatch script to be used is created from a template file in this
same module.
Parameters
----------
channel : Channel
Channel for accessing this provider. Possible channels include
:class:`~parsl.channels.LocalChannel` (the default),
:class:`~parsl.channels.SSHChannel`, or
:class:`~parsl.channels.SSHInteractiveLoginChannel`.
nodes_per_block : int
Nodes to provision per block.
When request_by_nodes is False, it is computed by cores_per_block / cores_per_node.
cores_per_block : int
Cores to provision per block. Enabled only when request_by_nodes is False.
cores_per_node: int
Cores to provision per node. Enabled only when request_by_nodes is False.
init_blocks : int
Number of blocks to request at the start of the run.
min_blocks : int
Minimum number of blocks to maintain.
max_blocks : int
Maximum number of blocks to maintain.
parallelism : float
Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
scaling where as many resources as possible are used; parallelism close to 0 represents
the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
walltime : str
Walltime requested per block in HH:MM:SS.
project : str
Project to which the resources must be charged
queue : str
Queue to which to submit the job request
scheduler_options : str
String to prepend to the #SBATCH blocks in the submit script to the scheduler.
worker_init : str
Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.
cmd_timeout : int
Seconds after which requests to the scheduler will timeout. Default: 120s
launcher : Launcher
Launcher for this provider. Possible launchers include
:class:`~parsl.launchers.SingleNodeLauncher` (the default),
:class:`~parsl.launchers.SrunLauncher`, or
:class:`~parsl.launchers.AprunLauncher`
move_files : Optional[Bool]: should files be moved? by default, Parsl will try to move files.
bsub_redirection: Bool
Should a redirection symbol "<" be included when submitting jobs, i.e., Bsub < job_script.
request_by_nodes: Bool
Request by nodes or request by cores per block.
When this is set to false, nodes_per_block is computed by cores_per_block / cores_per_node.
Default is True.
"""
def __init__(self,
channel=LocalChannel(),
nodes_per_block=1,
cores_per_block=None,
cores_per_node=None,
init_blocks=1,
min_blocks=0,
max_blocks=1,
parallelism=1,
walltime="00:10:00",
scheduler_options='',
worker_init='',
project=None,
queue=None,
cmd_timeout=120,
move_files=True,
bsub_redirection=False,
request_by_nodes=True,
launcher=SingleNodeLauncher()):
label = 'LSF'
super().__init__(label,
channel,
nodes_per_block,
init_blocks,
min_blocks,
max_blocks,
parallelism,
walltime,
cmd_timeout=cmd_timeout,
launcher=launcher)
self.project = project
self.queue = queue
self.cores_per_block = cores_per_block
self.cores_per_node = cores_per_node
self.move_files = move_files
self.bsub_redirection = bsub_redirection
self.request_by_nodes = request_by_nodes
# Update scheduler options
self.scheduler_options = scheduler_options + "\n"
if project:
self.scheduler_options += "#BSUB -P {}\n".format(project)
if queue:
self.scheduler_options += "#BSUB -q {}\n".format(queue)
if request_by_nodes:
self.scheduler_options += "#BSUB -nnodes {}\n".format(nodes_per_block)
else:
assert cores_per_block is not None and cores_per_node is not None, \
"Requesting resources by the number of cores. " \
"Need to specify cores_per_block and cores_per_node in the LSF provider."
self.scheduler_options += "#BSUB -n {}\n".format(cores_per_block)
self.scheduler_options += '#BSUB -R "span[ptile={}]"\n'.format(cores_per_node)
# Set nodes_per_block manually for Parsl strategy
assert cores_per_node != 0, "Need to specify a non-zero cores_per_node."
self.nodes_per_block = int(math.ceil(cores_per_block / cores_per_node))
self.worker_init = worker_init
def _status(self):
''' Internal: Do not call. Returns the status list for a list of job_ids
Args:
self
Returns:
[status...] : Status list of all jobs
'''
job_id_list = ','.join(self.resources.keys())
cmd = "bjobs {0}".format(job_id_list)
retcode, stdout, stderr = super().execute_wait(cmd)
# Execute_wait failed. Do no update
if retcode != 0:
logger.debug("Updating job status from {} failed with return code {}".format(self.label,
retcode))
return
jobs_missing = list(self.resources.keys())
for line in stdout.split('\n'):
parts = line.split()
if parts and parts[0] != 'JOBID':
job_id = parts[0]
state = translate_table.get(parts[2], JobState.UNKNOWN)
self.resources[job_id]['status'] = JobStatus(state)
jobs_missing.remove(job_id)
# squeue does not report on jobs that are not running. So we are filling in the
# blanks for missing jobs, we might lose some information about why the jobs failed.
for missing_job in jobs_missing:
self.resources[missing_job]['status'] = JobStatus(JobState.COMPLETED)
def submit(self, command, tasks_per_node, job_name="parsl.lsf"):
"""Submit the command as an LSF job.
Parameters
----------
command : str
Command to be made on the remote side.
tasks_per_node : int
Command invocations to be launched per node
job_name : str
Name for the job (must be unique).
Returns
-------
None or str
If at capacity, returns None; otherwise, a string identifier for the job
"""
job_name = "{0}.{1}".format(job_name, time.time())
script_path = "{0}/{1}.submit".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
logger.debug("Requesting one block with {} nodes".format(self.nodes_per_block))
job_config = {}
job_config["submit_script_dir"] = self.channel.script_dir
job_config["nodes"] = self.nodes_per_block
job_config["tasks_per_node"] = tasks_per_node
job_config["walltime"] = wtime_to_minutes(self.walltime)
job_config["scheduler_options"] = self.scheduler_options
job_config["worker_init"] = self.worker_init
job_config["user_script"] = command
# Wrap the command
job_config["user_script"] = self.launcher(command,
tasks_per_node,
self.nodes_per_block)
logger.debug("Writing submit script")
self._write_submit_script(template_string, script_path, job_name, job_config)
if self.move_files:
logger.debug("moving files")
channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
else:
logger.debug("not moving files")
channel_script_path = script_path
if self.bsub_redirection:
cmd = "bsub < {0}".format(channel_script_path)
else:
cmd = "bsub {0}".format(channel_script_path)
retcode, stdout, stderr = super().execute_wait(cmd)
job_id = None
if retcode == 0:
for line in stdout.split('\n'):
if line.lower().startswith("job") and "is submitted to" in line.lower():
job_id = line.split()[1].strip('<>')
self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}
else:
logger.warning("Submission of command to scale_out failed")
logger.error("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
return job_id
def cancel(self, job_ids):
''' Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
'''
job_id_list = ' '.join(job_ids)
retcode, stdout, stderr = super().execute_wait("bkill {0}".format(job_id_list))
rets = None
if retcode == 0:
for jid in job_ids:
self.resources[jid]['status'] = translate_table['USUSP'] # Job suspended by user/admin
rets = [True for i in job_ids]
else:
rets = [False for i in job_ids]
return rets
@property
def status_polling_interval(self):
return 60
| Parsl/parsl | parsl/providers/lsf/lsf.py | Python | apache-2.0 | 10,602 |
# -*- coding: utf-8 -*-
'''
fantastic Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib, urlparse, re
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
from resources.lib.modules import directstream
from resources.lib.modules import debrid
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['movie4k.is']
self.base_link = 'http://movie4k.is'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s' % (data['title'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
t2 = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t)
if not cleantitle.get_simple(t2.replace('Watch Online','')) == cleantitle.get(title): raise Exception()
l = client.parseDOM(post, 'link')[0]
p = client.parseDOM(post, 'pubDate')[0]
if data['year'] in p: items += [(t, l)]
except:
pass
print items
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
u = client.request(item[1])
if 'http://www.imdb.com/title/%s/' % data['imdb'] in u:
l = client.parseDOM(u, 'div', {'class': 'movieplay'})[0]
l = client.parseDOM(u, 'iframe', ret='data-lazy-src')[0]
quality, info = source_utils.get_release_quality(name, l)
info = ' | '.join(info)
url = l
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
valid, host = source_utils.is_host_valid(url,hostDict)
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url,
'info': info, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
| TheWardoctor/Wardoctors-repo | script.module.fantastic/lib/resources/lib/sources/en/movie4kis.py | Python | apache-2.0 | 4,184 |
# All-Terrain-Life-Vest
All Terrain Life Vest- IEA Raspverry Pi Competition Entry
# Description
import RPi.GPIO as GPIO
import time
import os
GPIO.setmode (GPIO.BCM)
GPIO.cleanup()
GPIO.setwarnings(False)
GPIO.setup(17,GPIO.OUT)
GPIO.setup(04,GPIO.OUT)
GPIO.setup(22, GPIO.IN)
print("---------------")
print("Button+GPIO")
print("---------------")
print GPIO.input(22)
while True:
if(GPIO.input(22)==False):
GPIO.output(17,GPIO.HIGH)
GPIO.output(04,GPIO.HIGH)
print("air bag activated")
os.system('date')
print GPIO.input(22)
time.sleep(1)
GPIO.output(17,GPIO.LOW)
GPIO.output(04,GPIO.LOW)
else:
os.system('clear')
print("air bag NOT activated")
time.sleep(1)
| 8acs2016/All-Terrain-Life-Vest | code.py | Python | apache-2.0 | 762 |
import urllib
from flask import Flask, Response, abort, request, send_file
from flask_restful import Resource, Api
from flask_cors import CORS, cross_origin
import datetime
import json
import vacker.file_factory
app = Flask(__name__)
api = Api(app)
file_factory = vacker.file_factory.FileFactory()
CORS(app, resources={"*": {"origins": "*"}})
class Stats(Resource):
def get(self, query_string):
pass
class Search(Resource):
def get(self):
res = file_factory.query_files(
query_string=request.args.get('q', ''),
start=request.args.get('start', 0),
limit=request.args.get('limit', 10),
sort=request.args.get('sort_field', None),
sort_dir=('asc' if request.args.get('sort_order', '1') == '1' else 'desc'))
for file_ in res['files']:
file_['blob_url'] = '/blob/' + urllib.parse.quote(file_['id'])
return {
'data': [file_ for file_ in res['files']],
'recordsTotal': res['total_results'],
'recordsFiltered': res['total_results']
}
class Blob(Resource):
def get(self, file_id):
file_ = file_factory.get_file_by_id(file_id)
parent = file_.get('a_parent_archive')
if parent:
return send_file(parent)
return send_file(file_.get_path())
# Year API
api.add_resource(Stats, '/stats/<string:query_string>')
api.add_resource(Search, '/search')
#api.add_resource(Blob, '/blob/<string:file_id>')
| MatthewJohn/vacker | vacker/server.py | Python | apache-2.0 | 1,498 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model defination for the RetinaNet Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from absl import logging
import tensorflow.compat.v2 as tf
from tensorflow.python.keras import backend
from official.vision.detection.dataloader import mode_keys
from official.vision.detection.evaluation import factory as eval_factory
from official.vision.detection.modeling import base_model
from official.vision.detection.modeling import losses
from official.vision.detection.modeling.architecture import factory
from official.vision.detection.ops import postprocess_ops
class RetinanetModel(base_model.Model):
"""RetinaNet model function."""
def __init__(self, params):
super(RetinanetModel, self).__init__(params)
# For eval metrics.
self._params = params
# Architecture generators.
self._backbone_fn = factory.backbone_generator(params)
self._fpn_fn = factory.multilevel_features_generator(params)
self._head_fn = factory.retinanet_head_generator(params.retinanet_head)
# Loss function.
self._cls_loss_fn = losses.RetinanetClassLoss(params.retinanet_loss)
self._box_loss_fn = losses.RetinanetBoxLoss(params.retinanet_loss)
self._box_loss_weight = params.retinanet_loss.box_loss_weight
self._keras_model = None
# Predict function.
self._generate_detections_fn = postprocess_ops.MultilevelDetectionGenerator(
params.postprocess)
self._transpose_input = params.train.transpose_input
assert not self._transpose_input, 'Transpose input is not supportted.'
# Input layer.
input_shape = (
params.retinanet_parser.output_size +
[params.retinanet_parser.num_channels])
self._input_layer = tf.keras.layers.Input(
shape=input_shape, name='',
dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32)
def build_outputs(self, inputs, mode):
# If the input image is transposed (from NHWC to HWCN), we need to revert it
# back to the original shape before it's used in the computation.
if self._transpose_input:
inputs = tf.transpose(inputs, [3, 0, 1, 2])
backbone_features = self._backbone_fn(
inputs, is_training=(mode == mode_keys.TRAIN))
fpn_features = self._fpn_fn(
backbone_features, is_training=(mode == mode_keys.TRAIN))
cls_outputs, box_outputs = self._head_fn(
fpn_features, is_training=(mode == mode_keys.TRAIN))
if self._use_bfloat16:
levels = cls_outputs.keys()
for level in levels:
cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)
box_outputs[level] = tf.cast(box_outputs[level], tf.float32)
model_outputs = {
'cls_outputs': cls_outputs,
'box_outputs': box_outputs,
}
return model_outputs
def build_loss_fn(self):
if self._keras_model is None:
raise ValueError('build_loss_fn() must be called after build_model().')
filter_fn = self.make_filter_trainable_variables_fn()
trainable_variables = filter_fn(self._keras_model.trainable_variables)
def _total_loss_fn(labels, outputs):
cls_loss = self._cls_loss_fn(outputs['cls_outputs'],
labels['cls_targets'],
labels['num_positives'])
box_loss = self._box_loss_fn(outputs['box_outputs'],
labels['box_targets'],
labels['num_positives'])
model_loss = cls_loss + self._box_loss_weight * box_loss
l2_regularization_loss = self.weight_decay_loss(self._l2_weight_decay,
trainable_variables)
total_loss = model_loss + l2_regularization_loss
return {
'total_loss': total_loss,
'cls_loss': cls_loss,
'box_loss': box_loss,
'model_loss': model_loss,
'l2_regularization_loss': l2_regularization_loss,
}
return _total_loss_fn
def build_model(self, params, mode=None):
if self._keras_model is None:
with backend.get_graph().as_default():
outputs = self.model_outputs(self._input_layer, mode)
model = tf.keras.models.Model(
inputs=self._input_layer, outputs=outputs, name='retinanet')
assert model is not None, 'Fail to build tf.keras.Model.'
model.optimizer = self.build_optimizer()
self._keras_model = model
return self._keras_model
def post_processing(self, labels, outputs):
# TODO(yeqing): Moves the output related part into build_outputs.
required_output_fields = ['cls_outputs', 'box_outputs']
for field in required_output_fields:
if field not in outputs:
raise ValueError('"%s" is missing in outputs, requried %s found %s',
field, required_output_fields, outputs.keys())
required_label_fields = ['image_info', 'groundtruths']
for field in required_label_fields:
if field not in labels:
raise ValueError('"%s" is missing in outputs, requried %s found %s',
field, required_label_fields, labels.keys())
boxes, scores, classes, valid_detections = self._generate_detections_fn(
outputs['box_outputs'], outputs['cls_outputs'],
labels['anchor_boxes'], labels['image_info'][:, 1:2, :])
# Discards the old output tensors to save memory. The `cls_outputs` and
# `box_outputs` are pretty big and could potentiall lead to memory issue.
outputs = {
'source_id': labels['groundtruths']['source_id'],
'image_info': labels['image_info'],
'num_detections': valid_detections,
'detection_boxes': boxes,
'detection_classes': classes,
'detection_scores': scores,
}
if 'groundtruths' in labels:
labels['source_id'] = labels['groundtruths']['source_id']
labels['boxes'] = labels['groundtruths']['boxes']
labels['classes'] = labels['groundtruths']['classes']
labels['areas'] = labels['groundtruths']['areas']
labels['is_crowds'] = labels['groundtruths']['is_crowds']
return labels, outputs
def eval_metrics(self):
return eval_factory.evaluator_generator(self._params.eval)
| alexgorban/models | official/vision/detection/modeling/retinanet_model.py | Python | apache-2.0 | 6,957 |
import logging
from boto3.resources.factory import ResourceFactory
from boto3.resources.model import ResourceModel
from boto3.resources.base import ResourceMeta
from boto3.docs import docstring
from boto3.exceptions import ResourceLoadException
from boto3.resources.factory import build_identifiers
from functools import partial
from aioboto3.resources.collection import AIOCollectionFactory
from aioboto3.resources.action import AIOServiceAction, AIOWaiterAction
from aioboto3.resources.base import AIOBoto3ServiceResource
logger = logging.getLogger(__name__)
class AIOBoto3ResourceFactory(ResourceFactory):
# noinspection PyMissingConstructor
def __init__(self, emitter):
self._collection_factory = AIOCollectionFactory()
self._emitter = emitter
async def load_from_definition(self, resource_name,
single_resource_json_definition, service_context):
logger.debug('Loading %s:%s', service_context.service_name,
resource_name)
# Using the loaded JSON create a ResourceModel object.
resource_model = ResourceModel(
resource_name, single_resource_json_definition,
service_context.resource_json_definitions
)
# Do some renaming of the shape if there was a naming collision
# that needed to be accounted for.
shape = None
if resource_model.shape:
shape = service_context.service_model.shape_for(
resource_model.shape)
resource_model.load_rename_map(shape)
# Set some basic info
meta = ResourceMeta(
service_context.service_name, resource_model=resource_model)
attrs = {
'meta': meta,
}
# Create and load all of attributes of the resource class based
# on the models.
# Identifiers
self._load_identifiers(
attrs=attrs, meta=meta, resource_name=resource_name,
resource_model=resource_model
)
# Load/Reload actions
self._load_actions(
attrs=attrs, resource_name=resource_name,
resource_model=resource_model, service_context=service_context
)
# Attributes that get auto-loaded
self._load_attributes(
attrs=attrs, meta=meta, resource_name=resource_name,
resource_model=resource_model,
service_context=service_context)
# Collections and their corresponding methods
self._load_collections(
attrs=attrs, resource_model=resource_model,
service_context=service_context)
# References and Subresources
self._load_has_relations(
attrs=attrs, resource_name=resource_name,
resource_model=resource_model, service_context=service_context
)
# Waiter resource actions
self._load_waiters(
attrs=attrs, resource_name=resource_name,
resource_model=resource_model, service_context=service_context
)
# Create the name based on the requested service and resource
cls_name = resource_name
if service_context.service_name == resource_name:
cls_name = 'ServiceResource'
cls_name = service_context.service_name + '.' + cls_name
base_classes = [AIOBoto3ServiceResource]
if self._emitter is not None:
await self._emitter.emit(
'creating-resource-class.%s' % cls_name,
class_attributes=attrs, base_classes=base_classes,
service_context=service_context)
return type(str(cls_name), tuple(base_classes), attrs)
def _create_autoload_property(factory_self, resource_name, name,
snake_cased, member_model, service_context):
"""
Creates a new property on the resource to lazy-load its value
via the resource's ``load`` method (if it exists).
"""
# The property loader will check to see if this resource has already
# been loaded and return the cached value if possible. If not, then
# it first checks to see if it CAN be loaded (raise if not), then
# calls the load before returning the value.
async def property_loader(self):
if self.meta.data is None:
if hasattr(self, 'load'):
await self.load()
else:
raise ResourceLoadException(
'{0} has no load method'.format(
self.__class__.__name__))
return self.meta.data.get(name)
property_loader.__name__ = str(snake_cased)
property_loader.__doc__ = docstring.AttributeDocstring(
service_name=service_context.service_name,
resource_name=resource_name,
attr_name=snake_cased,
event_emitter=factory_self._emitter,
attr_model=member_model,
include_signature=False
)
return property(property_loader)
def _create_waiter(factory_self, resource_waiter_model, resource_name,
service_context):
"""
Creates a new wait method for each resource where both a waiter and
resource model is defined.
"""
waiter = AIOWaiterAction(resource_waiter_model,
waiter_resource_name=resource_waiter_model.name)
async def do_waiter(self, *args, **kwargs):
await waiter(self, *args, **kwargs)
do_waiter.__name__ = str(resource_waiter_model.name)
do_waiter.__doc__ = docstring.ResourceWaiterDocstring(
resource_name=resource_name,
event_emitter=factory_self._emitter,
service_model=service_context.service_model,
resource_waiter_model=resource_waiter_model,
service_waiter_model=service_context.service_waiter_model,
include_signature=False
)
return do_waiter
def _create_class_partial(factory_self, subresource_model, resource_name,
service_context):
"""
Creates a new method which acts as a functools.partial, passing
along the instance's low-level `client` to the new resource
class' constructor.
"""
name = subresource_model.resource.type
async def create_resource(self, *args, **kwargs):
# We need a new method here because we want access to the
# instance's client.
positional_args = []
# We lazy-load the class to handle circular references.
json_def = service_context.resource_json_definitions.get(name, {})
resource_cls = await factory_self.load_from_definition(
resource_name=name,
single_resource_json_definition=json_def,
service_context=service_context
)
# Assumes that identifiers are in order, which lets you do
# e.g. ``sqs.Queue('foo').Message('bar')`` to create a new message
# linked with the ``foo`` queue and which has a ``bar`` receipt
# handle. If we did kwargs here then future positional arguments
# would lead to failure.
identifiers = subresource_model.resource.identifiers
if identifiers is not None:
for identifier, value in build_identifiers(identifiers, self):
positional_args.append(value)
return partial(resource_cls, *positional_args,
client=self.meta.client)(*args, **kwargs)
create_resource.__name__ = str(name)
create_resource.__doc__ = docstring.SubResourceDocstring(
resource_name=resource_name,
sub_resource_model=subresource_model,
service_model=service_context.service_model,
include_signature=False
)
return create_resource
def _create_action(factory_self, action_model, resource_name,
service_context, is_load=False):
"""
Creates a new method which makes a request to the underlying
AWS service.
"""
# Create the action in in this closure but before the ``do_action``
# method below is invoked, which allows instances of the resource
# to share the ServiceAction instance.
action = AIOServiceAction(
action_model, factory=factory_self,
service_context=service_context
)
# A resource's ``load`` method is special because it sets
# values on the resource instead of returning the response.
if is_load:
# We need a new method here because we want access to the
# instance via ``self``.
async def do_action(self, *args, **kwargs):
response = await action(self, *args, **kwargs)
self.meta.data = response
# Create the docstring for the load/reload mehtods.
lazy_docstring = docstring.LoadReloadDocstring(
action_name=action_model.name,
resource_name=resource_name,
event_emitter=factory_self._emitter,
load_model=action_model,
service_model=service_context.service_model,
include_signature=False
)
else:
# We need a new method here because we want access to the
# instance via ``self``.
async def do_action(self, *args, **kwargs):
response = await action(self, *args, **kwargs)
if hasattr(self, 'load'):
# Clear cached data. It will be reloaded the next
# time that an attribute is accessed.
# TODO: Make this configurable in the future?
self.meta.data = None
return response
lazy_docstring = docstring.ActionDocstring(
resource_name=resource_name,
event_emitter=factory_self._emitter,
action_model=action_model,
service_model=service_context.service_model,
include_signature=False
)
do_action.__name__ = str(action_model.name)
do_action.__doc__ = lazy_docstring
return do_action
| terrycain/aioboto3 | aioboto3/resources/factory.py | Python | apache-2.0 | 10,415 |
from PyCA.Core import *
import PyCA.Common as common
import numpy as np
import matplotlib.pyplot as plt
def SplatSafe(outMass, g, mass):
mType = mass.memType()
if mType == MEM_DEVICE:
minmaxl = MinMax(mass)
maxval = max([abs(x) for x in minmaxl])
if maxval > 2000.00:
# print 'Warning, range too big for splatting. Range: ',minmaxl
# print 'Temporary downscaling values for splatting. Will scale it back after splatting.'
scalefactor = float(100.00)/maxval
MulC_I(mass, scalefactor)
Splat(outMass, g, mass, False)
MulC_I(mass, 1.0/scalefactor)
MulC_I(outMass, 1.0/scalefactor)
else:
Splat(outMass, g, mass, False)
else:
Splat(outMass,g, mass, False)
# end SplatSafe
def ComputeVhat(out_v, scratchV, I, Ihat, m, mhat, diffOp):
'''
'''
Gradient(out_v, I)
MulMulC_I(out_v, Ihat, 1.0)
CoAdInf(scratchV, mhat ,m)
Sub_I(out_v, scratchV)
diffOp.applyInverseOperator(out_v)
return out_v
# end ComputeVhat
def EvaluateRHSFwd(out_v, v, phi):
'''
Evaluate RHS for forward integration of \frac{d\phi}{dt}=v\circ\phi
'''
ApplyH(out_v, v, phi, BACKGROUND_STRATEGY_PARTIAL_ZERO)
return out_v
#end EvaluateRHSFwd
def EvaluateRHSBwd(out_v, scratchV, v, I, m, mhat, Ihat, g, diffOp):
'''
'''
#first compute vhat_t
ComputeVhat(out_v, scratchV, I, Ihat, m, mhat, diffOp)
#compute Dvmhat
JacobianXY(scratchV, v, mhat)
#Add both
Add_I(scratchV, out_v)
#deform
ApplyH(out_v, scratchV, g, BACKGROUND_STRATEGY_PARTIAL_ZERO);
return out_v
#end EvaluateRHSBwd
def IntegrateGeodesic(m0,t,diffOp,\
m,g,ginv,\
scratchV1,scratchV2,scratchV3,\
keepstates=None,keepinds=None,\
Ninv=5,integMethod='EULER', startIndex=0, endIndex=0, initializePhi=True,\
RK4=None, scratchG=None):
'''
Resulted g and ginv are diffeomorphism and its inverse at end point of shooting. and keepstates is
populated with g and ginv at requested timepoints mentioned in keepinds. m is NOT the momentum at the end point. Must call CoAd(m,ginv,m0) after the call to get it.
If startTime is anything other than 0, it assumes g and ginv are appropriately initialized.
'''
# initial conditions
if endIndex == 0:
endIndex=len(t)-1
if startIndex == 0:
if initializePhi==True: # if t=0 is not the identity diffeomorphisms
SetToIdentity(g)
SetToIdentity(ginv)
Copy(m,m0)
else:
CoAd(m,ginv,m0)
else:
CoAd(m,ginv,m0) # assumes ginv is initialized when the function was called
# end if
# Smooth to get velocity at time t
diffOp.applyInverseOperator(m)
if (keepinds!=None) & (keepstates!=None):
if (0 in keepinds) & (startIndex == 0):
# remember to copy everything
indx_of_cur_tp = keepinds.index(0)
Copy(keepstates[indx_of_cur_tp][0],g)
Copy(keepstates[indx_of_cur_tp][1],ginv)
# end if
# end if
# do integration
for i in range(startIndex+1,endIndex+1,1):
#sys.stdout.write(',')
dt = t[i]-t[i-1] # time step
if integMethod == "EULER":
# print 'Running Euler integration for shooting'
# Compute forward step, w for g
EvaluateRHSFwd(scratchV1, m, g)
MulC_I(scratchV1, dt)
# Take the fwd step
Add_I(g,scratchV1)
# Update ginv corresponding to this fwd step
UpdateInverse(scratchV3, scratchV2, ginv, scratchV1, Ninv)
Copy(ginv,scratchV3)
elif integMethod == "RK4":
# RK4 integration two extra fields
if scratchG is None:
print 'scratchG variable is initialized in geodesic shooting'
scratchG = Field3D(m0.grid(),m0.memType())
if RK4 is None:
print 'RK4 variable is initialized in geodesic shooting'
RK4 = Field3D(m0.grid(),m0.memType())
EvaluateRHSFwd(scratchV1, m, g); MulC_I(scratchV1, dt) # k1 computed
Copy(RK4,scratchV1)
# for k2
Copy(scratchG, g); MulC_I(scratchV1,0.5); Add_I(scratchG,scratchV1); UpdateInverse(scratchV3, scratchV2, ginv, scratchV1, Ninv); CoAd(m,scratchV3,m0); diffOp.applyInverseOperator(m);
EvaluateRHSFwd(scratchV1, m, scratchG); MulC_I(scratchV1, dt) # k2 computed
Add_MulC_I(RK4,scratchV1,2.0)
# for k3
Copy(scratchG, g); MulC_I(scratchV1,0.5); Add_I(scratchG,scratchV1); UpdateInverse(scratchV3, scratchV2, ginv, scratchV1, Ninv); CoAd(m,scratchV3,m0); diffOp.applyInverseOperator(m);
EvaluateRHSFwd(scratchV1, m, scratchG); MulC_I(scratchV1, dt) # k3 computed
Add_MulC_I(RK4,scratchV1,2.0)
# for k4
Copy(scratchG, g); Add_I(scratchG,scratchV1); UpdateInverse(scratchV3, scratchV2, ginv, scratchV1, Ninv); CoAd(m,scratchV3,m0); diffOp.applyInverseOperator(m);
EvaluateRHSFwd(scratchV1, m, scratchG); MulC_I(scratchV1, dt) # k4 computed
Add_I(RK4,scratchV1)
# final update
MulC_I(RK4,1.0/float(6.0))
Add_I(g,RK4)
UpdateInverse(scratchV3, scratchV2, ginv, RK4, Ninv)
Copy(ginv,scratchV3)
else:
raise Exception('Unknown integration method: '+integMethod)
# end if
# common lines of code executed regardless of the integration scheme
# check whether we should store this state
if (keepinds!=None) & (keepstates!=None):
if i in keepinds:
# remember to copy everything
indx_of_cur_tp = keepinds.index(i)
Copy(keepstates[indx_of_cur_tp][0],g)
Copy(keepstates[indx_of_cur_tp][1],ginv)
# end if
# end if
# skip if it is the last iteration.
if i<endIndex:
# Coadjoint action gives us momentum at time t
CoAd(m,ginv,m0)
# Smooth to get velocity at time t
diffOp.applyInverseOperator(m)
# end if
# end for
# end IntegrateGeodesic
def IntegrateGeodesicBwdIteration(t,i,m0,g1,ginv1,m1,bwdG,bwdGinv,
gprev,ginvprev,
m1initialized,prev_was_checkpoint,
diffOp,
m,
scratchV1, scratchV2,scratchV3,
Ninv=5,integMethod='EULER',
RK4=None, scratchG=None):
if m1 is None:
print 'm1 variable is initialized in IntegrateGeodesicBwdIteration'
m1 = Field3D(m0.grid(),m0.memType())
if bwdG is None:
print 'bwdG variable is initialized in IntegrateGeodesicBwdIteration'
bwdG = Field3D(m0.grid(),m0.memType())
if bwdGinv is None:
print 'bwdGinv variable is initialized in IntegrateGeodesicBwdIteration'
bwdGinv = Field3D(m0.grid(),m0.memType())
if ( m1initialized == False ):
SetToIdentity(bwdG)
SetToIdentity(bwdGinv)
CoAd(m1,ginv1,m0)
m1initialized = True
# end if
# If previous iteration had a checkpoint bwdG, bwdGinv would have been updated. If not need an updated ones
if (prev_was_checkpoint == True) & (i != (len(t)-2)):
ComposeHH(bwdG,gprev,ginv1)
ComposeHH(bwdGinv,g1,ginvprev)
# end if
IntegrateGeodesic(m1,[0,t[i]-t[i+1]],diffOp,\
m,bwdG,bwdGinv,\
scratchV1,scratchV2,scratchV3,\
keepstates=None,keepinds=None,\
Ninv=Ninv,integMethod=integMethod,initializePhi=False,RK4=RK4,scratchG=scratchG)
ComposeHH(gprev,bwdG,g1)
ComposeHH(ginvprev, ginv1,bwdGinv)
prev_was_checkpoint = False
# end IntegrateGeodesicBwd
def IntegrateAdjoints(Iadj,madj,
I,m,Iadjtmp, madjtmp,v,
scratchV1,scratchV2,
I0,m0,
t, checkpointstates, checkpointinds,
IGradAtMsmts, msmtInds,
diffOp,
integMethod='EULER',Ninv=5,
scratchV3=None, scratchV4=None,scratchV5=None,scratchV6=None, scratchV7=None, # used when all timepoints are not checkpointed or with RK4
scratchV8=None, scratchV9=None, # used with RK4 only when all are not checkpointed
RK4 = None, scratchG = None,scratchGinv = None, # used only with RK4
scratchI = None
):
'''
'''
if len(t)-1 not in checkpointinds:
raise Exception('Endpoint must be one of the checkpoints passed to IntegrateAdjoints')
else:
indx_of_last_tp = checkpointinds.index(len(t)-1)
# extra reference names used
m1 = None
bwdG = None
bwdGinv = None
SetMem(madj,0.0)
SetMem(Iadj,0.0)
(g, ginv) = checkpointstates[indx_of_last_tp]
#I(t) and m(t) at end point
CoAd(m,ginv,m0)
ApplyH(I,I0,ginv)
Copy(v,m)
diffOp.applyInverseOperator(v) # has v(t)
SetMem(madjtmp,0.0) # will be used for hat version of madj
SetMem(Iadjtmp,0.0) # will be used for hat version of Iadj
# initial conditions
for k in range(len(msmtInds)):
if checkpointinds[msmtInds[k]] == (len(t)-1):
# there is a measurement at the last time point which will always be the case for matching but not necessarily regression
MulC(Iadjtmp,IGradAtMsmts[k],-1)
#Splat(Iadj,ginv, Iadjtmp,False) #Also, ginv = checkpointstates[msmtInds[k]][1]
SplatSafe(Iadj, ginv, Iadjtmp)
# end if
# end for
prev_was_checkpoint = True
m1initialized = False
for i in range(len(t)-2,-1,-1):
dt = t[i] - t[i+1]
if integMethod == "EULER":
# print 'Running Euler integration for adjoint integration'
EvaluateRHSBwd(scratchV1, scratchV2, v, I, m, madjtmp, Iadjtmp, g, diffOp)
# Euler step for madj
Add_MulC_I(madj, scratchV1, dt)
if i in checkpointinds:
indx_of_cur_tp = checkpointinds.index(i)
(g, ginv) = checkpointstates[indx_of_cur_tp]
prev_was_checkpoint = True
elif i > 0: # i=0 need not be calculated
# compute g and ginv by backward integrating geodesic
# oops we are going to need a lot of scratch variables
if scratchV6 is None:
print 'scratchV6 variable is initialized in IntegrateAdjoints'
scratchV6 = Field3D(I0.grid(),I0.memType())
if scratchV7 is None:
print 'scratchV7 variable is initialized in IntegrateAdjoints'
scratchV7 = Field3D(I0.grid(),I0.memType())
if (prev_was_checkpoint == True): # so that we do not update checkpointed states
Copy(scratchV6,g)
Copy(scratchV7,ginv)
# update reference
g=scratchV6
ginv=scratchV7
# madjtmp and v are used as scratch variables in below call
m1 = scratchV3; bwdG = scratchV4; bwdGinv = scratchV5; # update references for ease of reading
IntegrateGeodesicBwdIteration(t,i,m0, checkpointstates[indx_of_last_tp][0], checkpointstates[indx_of_last_tp][1],m1, bwdG, bwdGinv,
g,ginv,
m1initialized,prev_was_checkpoint,
diffOp,
madjtmp,
scratchV1,scratchV2,v,
Ninv=Ninv,integMethod=integMethod)
# end if
# if there is a measurement at this time point (for regression)
for k in range(len(msmtInds)):
if i>0:
if checkpointinds[msmtInds[k]] == i:
# I is used as scratch variable
#Splat(I, ginv, IGradAtMsmts[k],False)
SplatSafe(I, ginv, IGradAtMsmts[k])
Sub_I(Iadj,I)
# end if
elif msmtInds[k]==-1:# if there is a measurement at time t=0 it won't be checkpointed but HARDCODED to have msmstInds == -1. Note this will be checked only for t==0
Sub_I(Iadj, IGradAtMsmts[k])
# end if
# end for
# update variables for next iteration
if i > 0: # last iteration skipped
CoAd(m,ginv,m0)
ApplyH(I,I0,ginv)
Copy(v,m)
diffOp.applyInverseOperator(v) # has v(t)
ApplyH(madjtmp,madj,ginv,BACKGROUND_STRATEGY_PARTIAL_ZERO) # hat version of madj
#Splat(Iadjtmp, g, Iadj,False) # hat version of Iadj
SplatSafe(Iadjtmp, g, Iadj) # hat version of Iadj
# end if
elif integMethod == "RK4":
if RK4 is None:
print 'RK4 variable is initialized'
RK4 = Field3D(I0.grid(),I0.memType())
if scratchG is None:
print 'scratchG variable is initialized'
scratchG = Field3D(I0.grid(),I0.memType())
if scratchGinv is None:
print 'scratchGinv variable is initialized'
scratchGinv = Field3D(I0.grid(),I0.memType())
if scratchI is None:
print 'scratchI variable is initialized'
scratchI = Image3D(I0.grid(),I0.memType())
# first get g and ginv for current timepoint
if (i in checkpointinds) or (i==0): # assuming g and g inv points to prev
# just assign the references
if i>0:
indx_of_cur_tp = checkpointinds.index(i)
(gcur, ginvcur) = checkpointstates[indx_of_cur_tp]
# end if note if i==0, gcur and ginvcur are treated as identity
prev_was_checkpoint = True
# begin rk4 integration for adjoint
EvaluateRHSBwd(scratchV1, scratchV2, v, I, m, madjtmp, Iadjtmp, g, diffOp); MulC_I(scratchV1, dt) # k1 computed
Copy(RK4,scratchV1)
# compute and store phi0t, phit0, v_t, i_t, m_t and i_hat at t+1/2 for computing k2 and k3
if i>0:
SubMulC(scratchG,g,gcur,0.5); #scratchG has w
UpdateInverse(scratchGinv, scratchV2, ginvcur, scratchG, Ninv)
Add_I(scratchG,gcur)
else:# add 0.5 times identity
HtoV(scratchG,g);MulC_I(scratchG,0.5) #scratchG has w
# iteratively update inverse as, g_{1,0} = Id - w\circ g_{1,0}
SetToIdentity(scratchGinv)
for k in range(Ninv):
ApplyH(scratchV2, scratchG, scratchGinv, BACKGROUND_STRATEGY_PARTIAL_ZERO)
HtoV(scratchGinv,scratchV2); MulC_I(scratchGinv,-1.0)
VtoH_I(scratchG)
# end if
CoAd(m,scratchGinv,m0); Copy(v,m); diffOp.applyInverseOperator(v); ApplyH(I,I0,scratchGinv)
#Splat(Iadjtmp, scratchG, Iadj,False)
SplatSafe(Iadjtmp, scratchG, Iadj)
# for k2
# mhat_t at t+1/2 for k2
Add_MulC(scratchV2,madj,scratchV1,0.5) # mtilde at t+1/2
ApplyH(madjtmp,scratchV2,scratchGinv, BACKGROUND_STRATEGY_PARTIAL_ZERO); #mhat at t+1/2
EvaluateRHSBwd(scratchV1, scratchV2, v, I, m, madjtmp, Iadjtmp, scratchG, diffOp); MulC_I(scratchV1, dt) # k2 computed
Add_MulC_I(RK4, scratchV1, 2.0)
# for k3
# mhat_t at t+1/2 for k3
Add_MulC(scratchV2,madj,scratchV1,0.5) # mtilde at t+1/2
ApplyH(madjtmp,scratchV2,scratchGinv, BACKGROUND_STRATEGY_PARTIAL_ZERO); #mhat at t+1/2
EvaluateRHSBwd(scratchV1, scratchV2, v, I, m, madjtmp, Iadjtmp, scratchG, diffOp); MulC_I(scratchV1, dt) # k3 computed
Add_MulC_I(RK4, scratchV1, 2.0)
# compute v_t, i_t, m_t, i_hat at t for computing k4
if i>0:
CoAd(m,ginvcur,m0)
#Splat(Iadjtmp, gcur, Iadj,False)
SplatSafe(Iadjtmp, gcur, Iadj)
ApplyH(I,I0,ginvcur)
else:
Copy(m,m0)
Copy(Iadjtmp,Iadj)
Copy(I,I0)
Copy(v,m); diffOp.applyInverseOperator(v);
# for k4
# mhat_t at t for k4
Add(scratchV2,madj,scratchV1) # mtilde at t
if i>0:
ApplyH(madjtmp,scratchV2,ginvcur, BACKGROUND_STRATEGY_PARTIAL_ZERO);
else:
Copy(madjtmp,scratchV2)
# end if #mhat at t
if i>0:
EvaluateRHSBwd(scratchV1, scratchV2, v, I, m, madjtmp, Iadjtmp, gcur, diffOp); MulC_I(scratchV1, dt) # k4 computed
else:
SetToIdentity(scratchG)
EvaluateRHSBwd(scratchV1, scratchV2, v, I, m, madjtmp, Iadjtmp, scratchG, diffOp); MulC_I(scratchV1, dt) # k4 computed
Add_I(RK4, scratchV1)
# final update
MulC_I(RK4,1.0/float(6.0))
Add_I(madj,RK4)
#FOR NEXT ITERATION:
# compute mhat_t, ihat_t at t to use in k1 computation. Note v_t, i_t and m_t are still stored from this iteration.
# if there is a measurement at this time point (for regression)
for k in range(len(msmtInds)):
if i>0:
if checkpointinds[msmtInds[k]] == i:
#Splat(scratchV1, ginvcur, IGradAtMsmts[k],False)
SplatSafe(scratchI, ginvcur, IGradAtMsmts[k])
Sub_I(Iadj,scratchI)
#Splat(Iadjtmp, gcur, Iadj,False) # hat version of Iadj
SplatSafe(Iadjtmp, gcur, Iadj) # hat version of Iadj
# end if
elif msmtInds[k]==-1: # if there is a measurement at time t=0 it won't be checkpointed but HARDCODED to have msmstInds == -1. Note this will be checked only for t==0
Sub_I(Iadj, IGradAtMsmts[k])
# end if
# end for
if i > 0: # last iteration skipped
ApplyH(madjtmp,madj,ginvcur, BACKGROUND_STRATEGY_PARTIAL_ZERO) # hat version of madj
# assign g, ginv appropriately for next iteration
g = gcur
ginv = ginvcur
# end if
else:
# raise Exception('RK4 integration without all checkpoints not yet implemented')
# compute gcur and ginvcur by backward integrating geodesic
if scratchV6 is None:
print 'scratchV6 variable is initialized in IntegrateAdjoints'
scratchV6 = Field3D(I0.grid(),I0.memType())
if scratchV7 is None:
print 'scratchV7 variable is initialized in IntegrateAdjoints'
scratchV7 = Field3D(I0.grid(),I0.memType())
if scratchV8 is None:
print 'scratchV8 variable is initialized in IntegrateAdjoints'
scratchV8 = Field3D(I0.grid(),I0.memType())
if scratchV9 is None:
print 'scratchV9 variable is initialized in IntegrateAdjoints'
scratchV9 = Field3D(I0.grid(),I0.memType())
# initialize with previous
if prev_was_checkpoint == True:
gcur=scratchV8
ginvcur=scratchV9
Copy(gcur,g)
Copy(ginvcur,ginv)
# endif --previous was not checkpoint scratchV6 and scratch V8 should both have g and scratchV7 and scratch V9 should both have ginv. so no need to copy
# scratchG, scratchGinv and v are used as scratch variables in below call
m1 = scratchV3; bwdG = scratchV4; bwdGinv = scratchV5; # update references for ease of reading
IntegrateGeodesicBwdIteration(t,i,m0, checkpointstates[indx_of_last_tp][0], checkpointstates[indx_of_last_tp][1],m1, bwdG, bwdGinv,
gcur,ginvcur,
m1initialized,prev_was_checkpoint,
diffOp,
scratchGinv,
scratchV1,scratchV2,v,
Ninv=Ninv,integMethod=integMethod,
RK4=RK4,scratchG=scratchG)
# begin rk4 integration for adjoint
Copy(v,m); diffOp.applyInverseOperator(v);
EvaluateRHSBwd(scratchV1, scratchV2, v, I, m, madjtmp, Iadjtmp, g, diffOp); MulC_I(scratchV1, dt) # k1 computed
Copy(RK4,scratchV1)
# compute and store phi0t, phit0, v_t, i_t, m_t and i_hat at t+1/2 for computing k2 and k3
SubMulC(scratchG,g,gcur,0.5); #scratchG has w
UpdateInverse(scratchGinv, scratchV2, ginvcur, scratchG, Ninv)
Add_I(scratchG,gcur)
CoAd(m,scratchGinv,m0); Copy(v,m); diffOp.applyInverseOperator(v); ApplyH(I,I0,scratchGinv)
#Splat(Iadjtmp, scratchG, Iadj,False)
SplatSafe(Iadjtmp, scratchG, Iadj)
# for k2
# mhat_t at t+1/2 for k2
Add_MulC(scratchV2,madj,scratchV1,0.5) # mtilde at t+1/2
ApplyH(madjtmp,scratchV2,scratchGinv, BACKGROUND_STRATEGY_PARTIAL_ZERO); #mhat at t+1/2
EvaluateRHSBwd(scratchV1, scratchV2, v, I, m, madjtmp, Iadjtmp, scratchG, diffOp); MulC_I(scratchV1, dt) # k2 computed
Add_MulC_I(RK4, scratchV1, 2.0)
# for k3
# mhat_t at t+1/2 for k3
Add_MulC(scratchV2,madj,scratchV1,0.5) # mtilde at t+1/2
ApplyH(madjtmp,scratchV2,scratchGinv, BACKGROUND_STRATEGY_PARTIAL_ZERO); #mhat at t+1/2
EvaluateRHSBwd(scratchV1, scratchV2, v, I, m, madjtmp, Iadjtmp, scratchG, diffOp); MulC_I(scratchV1, dt) # k3 computed
Add_MulC_I(RK4, scratchV1, 2.0)
# compute v_t, i_t, m_t, i_hat at t for computing k4
CoAd(m,ginvcur,m0)
#Splat(Iadjtmp, gcur, Iadj,False)
SplatSafe(Iadjtmp, gcur, Iadj)
ApplyH(I,I0,ginvcur)
Copy(v,m); diffOp.applyInverseOperator(v);
# for k4
# mhat_t at t for k4
Add(scratchV2,madj,scratchV1) # mtilde at t
ApplyH(madjtmp,scratchV2,ginvcur, BACKGROUND_STRATEGY_PARTIAL_ZERO);
EvaluateRHSBwd(scratchV1, scratchV2, v, I, m, madjtmp, Iadjtmp, gcur, diffOp); MulC_I(scratchV1, dt) # k4 computed
Add_I(RK4, scratchV1)
# final update
MulC_I(RK4,1.0/float(6.0))
Add_I(madj,RK4)
#FOR NEXT ITERATION:
# compute mhat_t, ihat_t at t to use in k1 computation. Note v_t, i_t and m_t are still stored from this iteration.
# if there is a measurement at this time point (for regression)
for k in range(len(msmtInds)):
if i>0:
if checkpointinds[msmtInds[k]] == i:
#Splat(scratchV1, ginvcur, IGradAtMsmts[k],False)
SplatSafe(scratchI, ginvcur, IGradAtMsmts[k])
Sub_I(Iadj,scratchI)
#Splat(Iadjtmp, gcur, Iadj,False) # hat version of Iadj
SplatSafe(Iadjtmp, gcur, Iadj) # hat version of Iadj
# end if
elif msmtInds[k]==-1: # if there is a measurement at time t=0 it won't be checkpointed but HARDCODED to have msmstInds == -1. Note this will be checked only for t==0
Sub_I(Iadj,IGradAtMsmts[k])
# end if
# end for
ApplyH(madjtmp,madj,ginvcur, BACKGROUND_STRATEGY_PARTIAL_ZERO) # hat version of madj
# assign g, ginv appropriately for next iteration
# first assign references
g = scratchV6
ginv = scratchV7
# then copy memory
Copy(g,gcur)
Copy(ginv,ginvcur)
# end if
else:
raise Exception('Unknown integration method: '+integMethod)
#end if
# common lines of code executed regardless of the integration scheme
# end for
return m
#end IntegrateAdjoints
def ParallelTransport(m1, n0, m0, nTimeSteps, diffOp, Ninv=10, integMethod='RK4',saveOutput=False, mtArray=None, ginvArray=None):
'''
Parallel translation of vector momentum, m0 along the geodesic
denoted by initial condition, n0 from t=0 to t=1 using given time
descritization, nTimeSteps. Returns the parallely tranlated
momentum in argument variable m1 at the end point of geodesic at
t=1. Also, returns list of norm of m(t), norm(n) and inner
product between m and n at all timepoints along the geodesic.
'''
t = [x*1./nTimeSteps for x in range(nTimeSteps+1)]
mGrid = n0.grid()
mType = n0.memType()
v = Field3D(mGrid, mType)
#m = m1
m = Field3D(mGrid, mType)
w = Field3D(mGrid, mType)
n = Field3D(mGrid, mType)
g = Field3D(mGrid, mType)
ginv = Field3D(mGrid, mType)
k_n = Field3D(mGrid, mType)
k_m = Field3D(mGrid, mType)
scratchV1 = Field3D(mGrid, mType)
scratchV2 = Field3D(mGrid, mType)
scratchG = Field3D(mGrid, mType)
scratchM = Field3D(mGrid, mType)
RK4_m = Field3D(mGrid, mType)
RK4_n = Field3D(mGrid, mType)
SetToIdentity(g)
SetToIdentity(ginv)
Copy(n, n0)
Copy(w,n)
diffOp.applyInverseOperator(w)
Copy(m, m0)
Copy(v,m)
diffOp.applyInverseOperator(v)
#common.DebugHere()
mtArray=[]
ginvArray=[]
if saveOutput:
#common.DebugHere()
mtArray.append(m.copy())
ginvArray.append(ginv.copy())
norm2_m=[Dot(m,v)]
norm2_n=[Dot(n,w)]
inner_m_n=[Dot(m,w)]
for i in range(1,len(t),1):
dt = t[i] - t[i-1]
if integMethod == "EULER":
raise Exception('Euler integration for parallel transport not implemented: '+integMethod)
elif integMethod == "RK4":
EvaluateRHSFwd(k_n, w, g); MulC_I(k_n, dt) # k1_n computed
Copy(RK4_n,k_n)
EvaluateRHSParallelTransport(k_m, scratchV1, n, w, m, v, diffOp); MulC_I(k_m, dt) # k1_m computed
Copy(RK4_m,k_m)
# for k2_n
Copy(scratchG, g); MulC_I(k_n,0.5); Add_I(scratchG,k_n); UpdateInverse(scratchV2, scratchV1, ginv, k_n, Ninv); CoAd(n,scratchV2, n0); Copy(w,n); diffOp.applyInverseOperator(w);
EvaluateRHSFwd(k_n, w, scratchG); MulC_I(k_n, dt) # k2 computed
Add_MulC_I(RK4_n, k_n, 2.0)
# for k2_m
Copy(scratchM, m); MulC_I(k_m,0.5); Add_I(scratchM,k_m); Copy(v,scratchM); diffOp.applyInverseOperator(v);
EvaluateRHSParallelTransport(k_m, scratchV1, n, w, scratchM, v, diffOp); MulC_I(k_m, dt) # k2_m computed
Add_MulC_I(RK4_m, k_m, 2.0)
# for k3_n
Copy(scratchG, g); MulC_I(k_n,0.5); Add_I(scratchG,k_n); UpdateInverse(scratchV2, scratchV1, ginv, k_n, Ninv); CoAd(n,scratchV2, n0);Copy(w,n); diffOp.applyInverseOperator(w);
EvaluateRHSFwd(k_n, w, scratchG); MulC_I(k_n, dt) # k3_n computed
Add_MulC_I(RK4_n, k_n, 2.0)
# for k3_m
Copy(scratchM, m); MulC_I(k_m,0.5); Add_I(scratchM,k_m); Copy(v,scratchM); diffOp.applyInverseOperator(v);
EvaluateRHSParallelTransport(k_m, scratchV1, n, w, scratchM, v, diffOp); MulC_I(k_m, dt) # k3_m computed
Add_MulC_I(RK4_m, k_m, 2.0)
# for k4_n
Copy(scratchG, g); Add_I(scratchG,k_n); UpdateInverse(scratchV2, scratchV1, ginv, k_n, Ninv); CoAd(n,scratchV2, n0);Copy(w,n); diffOp.applyInverseOperator(w);
EvaluateRHSFwd(k_n, w, scratchG); MulC_I(k_n, dt) # k4_n computed
Add_I(RK4_n, k_n)
# for k4_m
Copy(scratchM, m); Add_I(scratchM,k_m); Copy(v,scratchM); diffOp.applyInverseOperator(v);
EvaluateRHSParallelTransport(k_m, scratchV1, n, w, scratchM, v, diffOp); MulC_I(k_m, dt) # k4_m computed
Add_I(RK4_m, k_m)
# final update
MulC_I(RK4_n,1.0/float(6.0))
Add_I(g,RK4_n)
UpdateInverse(scratchV2, scratchV1, ginv, RK4_n, Ninv)
Copy(ginv,scratchV2)
Add_MulC_I(m, RK4_m, 1.0/float(6.0))
else:
raise Exception('Unknown integration method: '+integMethod)
# end if
# Coadjoint action gives us momentum at time t
CoAd(n, ginv, n0)
Copy(w,n)
# Smooth to get velocity at time t
diffOp.applyInverseOperator(w)
Copy(v,m)
diffOp.applyInverseOperator(v)
if saveOutput:
mtArray.append(m.copy())
ginvArray.append(ginv.copy())
# save norms and inner product
norm2_m.append(Dot(m,v))
norm2_n.append(Dot(n,w))
inner_m_n.append(Dot(m,w))
# end for
Copy(m1,m)
return norm2_m, norm2_n, inner_m_n, mtArray, ginvArray
def EvaluateRHSParallelTransport(out_m, scratchV, n, w, m, v, diffOp):
'''
Evaluate RHS for parallel transport
'''
AdInf(out_m, w, v)
diffOp.applyOperator(out_m)
CoAdInf(scratchV, w, m)
Sub_I(out_m, scratchV)
CoAdInf(scratchV, v, n)
Sub_I(out_m, scratchV)
MulC_I(out_m, 0.5)
return out_m
#end EvaluateRHSFwd
def MyGridPlot(vf, sliceIdx=None, dim='z', every=1, isVF=True,
color='g', plotBase=True, colorbase='#A0A0FF'):
sliceArr = common.ExtractSliceArrVF(vf, sliceIdx, dim)
sz = sliceArr.shape
hID = np.mgrid[0:sz[0], 0:sz[1]]
d1 = np.squeeze(hID[1, ::every, ::every])
d2 = np.squeeze(hID[0, ::every, ::every])
sliceArr = sliceArr[::every, ::every, :]
if plotBase:
plt.plot(d1, d2, colorbase)
plt.hold(True)
plt.plot(d1.T, d2.T, colorbase)
if not isVF:
d1 = np.zeros(d1.shape)
d2 = np.zeros(d2.shape)
if dim=='z':
plt.plot(d1+np.squeeze(sliceArr[:,:,1]),
d2+np.squeeze(sliceArr[:,:,0]), color)
plt.hold(True)
plt.plot((d1+np.squeeze(sliceArr[:,:,1])).T,
(d2+np.squeeze(sliceArr[:,:,0])).T, color)
plt.hold(False)
elif dim=='x':
plt.plot(d1+np.squeeze(sliceArr[:,:,2]),
d2+np.squeeze(sliceArr[:,:,1]), color)
plt.hold(True)
plt.plot((d1+np.squeeze(sliceArr[:,:,2])).T,
(d2+np.squeeze(sliceArr[:,:,1])).T, color)
plt.hold(False)
elif dim=='y':
plt.plot(d1+np.squeeze(sliceArr[:,:,2]),
d2+np.squeeze(sliceArr[:,:,0]), color)
plt.hold(True)
plt.plot((d1+np.squeeze(sliceArr[:,:,2])).T,
(d2+np.squeeze(sliceArr[:,:,0])).T, color)
plt.hold(False)
# change axes to match image axes
if not plt.gca().yaxis_inverted():
plt.gca().invert_yaxis()
# force redraw
plt.draw()
def MyQuiver(vf, sliceIdx=None, dim='z',every=1,thresh=None,scaleArrows=None,arrowCol='r',lineWidth=None, width=None):
sliceArr = common.ExtractSliceArrVF(vf, sliceIdx, dim)
if dim=='z':
vy = np.squeeze(sliceArr[:,:,0])
vx = np.squeeze(sliceArr[:,:,1])
elif dim=='x':
vy = np.squeeze(sliceArr[:,:,1])
vx = np.squeeze(sliceArr[:,:,2])
elif dim=='y':
vy = np.squeeze(sliceArr[:,:,0])
vx = np.squeeze(sliceArr[:,:,2])
vxshow = np.zeros(np.shape(vx))
vyshow = np.zeros(np.shape(vy))
vxshow[::every,::every] = vx[::every,::every]
vyshow[::every,::every] = vy[::every,::every]
valindex = np.zeros(np.shape(vx),dtype=bool)
valindex[::every,::every] = True
if thresh is not None:
valindex[(vx**2+vy**2)<thresh] = False
#gridX, gridY = np.meshgrid(range(np.shape(vx)[0]),range(np.shape(vx)[1]-1,-1,-1))
gridX, gridY = np.meshgrid(range(np.shape(vx)[0]),range(np.shape(vx)[1]))
quiverhandle = plt.quiver(gridX[valindex],gridY[valindex],vx[valindex],vy[valindex],scale=scaleArrows,color=arrowCol,linewidth=lineWidth,width=width,zorder=4)
# change axes to match image axes
plt.gca().invert_yaxis()
# force redraw
plt.draw()
| rkwitt/quicksilver | code/vectormomentum/Code/Python/Libraries/CAvmCommon.py | Python | apache-2.0 | 34,587 |
import shutil
import tempfile
import numpy as np
import os
from os.path import getsize
import pytest
import yaml
from util import PATH_TO_TESTS, seed, dummy_predict_with_threshold
PATH_TO_ASSETS = os.path.join(PATH_TO_TESTS, 'assets')
PATH_TO_RETINA_DIR = os.path.join(PATH_TO_ASSETS, 'recordings', 'retina')
PATH_TO_RETINA_CONFIG_DIR = os.path.join(PATH_TO_RETINA_DIR, 'config')
@pytest.fixture(autouse=True)
def setup():
seed(0)
@pytest.fixture
def patch_triage_network(monkeypatch):
to_patch = 'yass.neuralnetwork.model.KerasModel.predict_with_threshold'
monkeypatch.setattr(to_patch, dummy_predict_with_threshold)
yield
def _path_to_config():
return os.path.join(PATH_TO_RETINA_CONFIG_DIR, 'config.yaml')
def _data_info():
with open(_path_to_config()) as f:
d = yaml.load(f)
return d
@pytest.fixture()
def data_info():
return _data_info()
@pytest.fixture()
def data():
info = _data_info()['recordings']
path = os.path.join(PATH_TO_RETINA_DIR, 'data.bin')
d = np.fromfile(path, dtype=info['dtype'])
n_observations = int(getsize(path) / info['n_channels'] /
np.dtype(info['dtype']).itemsize)
d = d.reshape(n_observations, info['n_channels'])
return d
@pytest.fixture()
def path_to_tests():
return PATH_TO_TESTS
@pytest.fixture()
def path_to_performance():
return os.path.join(PATH_TO_TESTS, 'performance/')
@pytest.fixture
def make_tmp_folder():
temp = tempfile.mkdtemp()
yield temp
shutil.rmtree(temp)
@pytest.fixture()
def path_to_data():
return os.path.join(PATH_TO_RETINA_DIR, 'data.bin')
@pytest.fixture()
def path_to_geometry():
return os.path.join(PATH_TO_RETINA_DIR, 'geometry.npy')
@pytest.fixture()
def path_to_sample_pipeline_folder():
return os.path.join(PATH_TO_RETINA_DIR,
'sample_pipeline_output')
@pytest.fixture()
def path_to_standardized_data():
return os.path.join(PATH_TO_RETINA_DIR,
'sample_pipeline_output', 'preprocess',
'standardized.bin')
@pytest.fixture()
def path_to_output_reference():
return os.path.join(PATH_TO_ASSETS, 'output_reference')
@pytest.fixture
def path_to_config():
return _path_to_config()
@pytest.fixture
def path_to_config_threshold():
return os.path.join(PATH_TO_RETINA_CONFIG_DIR, 'config_threshold.yaml')
@pytest.fixture
def path_to_config_with_wrong_channels():
return os.path.join(PATH_TO_RETINA_CONFIG_DIR,
'wrong_channels.yaml')
@pytest.fixture
def path_to_txt_geometry():
return os.path.join(PATH_TO_ASSETS, 'test_files', 'geometry.txt')
@pytest.fixture
def path_to_npy_geometry():
return os.path.join(PATH_TO_ASSETS, 'test_files', 'geometry.npy')
| paninski-lab/yass | tests/conftest.py | Python | apache-2.0 | 2,794 |
def index_power(array, n):
if n>=len(array):
return -1
else:
return array[n]**n
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert index_power([1, 2, 3, 4], 2) == 9, "Square"
assert index_power([1, 3, 10, 100], 3) == 1000000, "Cube"
assert index_power([0, 1], 0) == 1, "Zero power"
assert index_power([1, 2], 3) == -1, "IndexError" | nesterione/problem-solving-and-algorithms | problems/Empireofcode/IndexPower.py | Python | apache-2.0 | 441 |
# Copyright (c) 2018 Foundries.io
#
# SPDX-License-Identifier: Apache-2.0
import argparse
from unittest.mock import patch, call
import pytest
from runners.nrfjprog import NrfJprogBinaryRunner
from conftest import RC_KERNEL_HEX
#
# Test values
#
TEST_DEF_SNR = 'test-default-serial-number' # for mocking user input
TEST_OVR_SNR = 'test-override-serial-number'
#
# Expected results.
#
# This dictionary maps different configurations to the commands we expect to be
# executed for them. Verification is done by mocking the check_call() method,
# which is used to run the commands.
#
# The key naming scheme is <F><SR><SN><E>, where:
#
# - F: family, 1 for 'NRF51' or 2 for 'NRF52'
# - SR: soft reset, Y for yes, N for pin reset
# - SNR: serial number override, Y for yes, N for 'use default'
# - E: full chip erase, Y for yes, N for sector / sector and UICR only
#
EXPECTED_COMMANDS = {
# NRF51:
'1NNN':
(['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF51', '--snr', TEST_DEF_SNR, '--sectorerase'], # noqa: E501
['nrfjprog', '--pinreset', '-f', 'NRF51', '--snr', TEST_DEF_SNR]),
'1NNY':
(['nrfjprog', '--eraseall', '-f', 'NRF51', '--snr', TEST_DEF_SNR],
['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF51', '--snr', TEST_DEF_SNR], # noqa: E501
['nrfjprog', '--pinreset', '-f', 'NRF51', '--snr', TEST_DEF_SNR]),
'1NYN':
(['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF51', '--snr', TEST_OVR_SNR, '--sectorerase'], # noqa: E501
['nrfjprog', '--pinreset', '-f', 'NRF51', '--snr', TEST_OVR_SNR]),
'1NYY':
(['nrfjprog', '--eraseall', '-f', 'NRF51', '--snr', TEST_OVR_SNR],
['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF51', '--snr', TEST_OVR_SNR], # noqa: E501
['nrfjprog', '--pinreset', '-f', 'NRF51', '--snr', TEST_OVR_SNR]),
'1YNN':
(['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF51', '--snr', TEST_DEF_SNR, '--sectorerase'], # noqa: E501
['nrfjprog', '--reset', '-f', 'NRF51', '--snr', TEST_DEF_SNR]),
'1YNY':
(['nrfjprog', '--eraseall', '-f', 'NRF51', '--snr', TEST_DEF_SNR],
['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF51', '--snr', TEST_DEF_SNR], # noqa: E501
['nrfjprog', '--reset', '-f', 'NRF51', '--snr', TEST_DEF_SNR]),
'1YYN':
(['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF51', '--snr', TEST_OVR_SNR, '--sectorerase'], # noqa: E501
['nrfjprog', '--reset', '-f', 'NRF51', '--snr', TEST_OVR_SNR]),
'1YYY':
(['nrfjprog', '--eraseall', '-f', 'NRF51', '--snr', TEST_OVR_SNR],
['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF51', '--snr', TEST_OVR_SNR], # noqa: E501
['nrfjprog', '--reset', '-f', 'NRF51', '--snr', TEST_OVR_SNR]),
# NRF52:
'2NNN':
(['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF52', '--snr', TEST_DEF_SNR, '--sectoranduicrerase'], # noqa: E501
['nrfjprog', '--pinresetenable', '-f', 'NRF52', '--snr', TEST_DEF_SNR],
['nrfjprog', '--pinreset', '-f', 'NRF52', '--snr', TEST_DEF_SNR]),
'2NNY':
(['nrfjprog', '--eraseall', '-f', 'NRF52', '--snr', TEST_DEF_SNR],
['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF52', '--snr', TEST_DEF_SNR], # noqa: E501
['nrfjprog', '--pinresetenable', '-f', 'NRF52', '--snr', TEST_DEF_SNR],
['nrfjprog', '--pinreset', '-f', 'NRF52', '--snr', TEST_DEF_SNR]),
'2NYN':
(['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF52', '--snr', TEST_OVR_SNR, '--sectoranduicrerase'], # noqa: E501
['nrfjprog', '--pinresetenable', '-f', 'NRF52', '--snr', TEST_OVR_SNR],
['nrfjprog', '--pinreset', '-f', 'NRF52', '--snr', TEST_OVR_SNR]),
'2NYY':
(['nrfjprog', '--eraseall', '-f', 'NRF52', '--snr', TEST_OVR_SNR],
['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF52', '--snr', TEST_OVR_SNR], # noqa: E501
['nrfjprog', '--pinresetenable', '-f', 'NRF52', '--snr', TEST_OVR_SNR],
['nrfjprog', '--pinreset', '-f', 'NRF52', '--snr', TEST_OVR_SNR]),
'2YNN':
(['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF52', '--snr', TEST_DEF_SNR, '--sectoranduicrerase'], # noqa: E501
['nrfjprog', '--reset', '-f', 'NRF52', '--snr', TEST_DEF_SNR]),
'2YNY':
(['nrfjprog', '--eraseall', '-f', 'NRF52', '--snr', TEST_DEF_SNR],
['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF52', '--snr', TEST_DEF_SNR], # noqa: E501
['nrfjprog', '--reset', '-f', 'NRF52', '--snr', TEST_DEF_SNR]),
'2YYN':
(['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF52', '--snr', TEST_OVR_SNR, '--sectoranduicrerase'], # noqa: E501
['nrfjprog', '--reset', '-f', 'NRF52', '--snr', TEST_OVR_SNR]),
'2YYY':
(['nrfjprog', '--eraseall', '-f', 'NRF52', '--snr', TEST_OVR_SNR],
['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF52', '--snr', TEST_OVR_SNR], # noqa: E501
['nrfjprog', '--reset', '-f', 'NRF52', '--snr', TEST_OVR_SNR]),
# NRF91:
'9NNN':
(['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF91', '--snr', TEST_DEF_SNR, '--sectorerase'], # noqa: E501
['nrfjprog', '--pinreset', '-f', 'NRF91', '--snr', TEST_DEF_SNR]),
'9NNY':
(['nrfjprog', '--eraseall', '-f', 'NRF91', '--snr', TEST_DEF_SNR],
['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF91', '--snr', TEST_DEF_SNR], # noqa: E501
['nrfjprog', '--pinreset', '-f', 'NRF91', '--snr', TEST_DEF_SNR]),
'9NYN':
(['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF91', '--snr', TEST_OVR_SNR, '--sectorerase'], # noqa: E501
['nrfjprog', '--pinreset', '-f', 'NRF91', '--snr', TEST_OVR_SNR]),
'9NYY':
(['nrfjprog', '--eraseall', '-f', 'NRF91', '--snr', TEST_OVR_SNR],
['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF91', '--snr', TEST_OVR_SNR], # noqa: E501
['nrfjprog', '--pinreset', '-f', 'NRF91', '--snr', TEST_OVR_SNR]),
'9YNN':
(['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF91', '--snr', TEST_DEF_SNR, '--sectorerase'], # noqa: E501
['nrfjprog', '--reset', '-f', 'NRF91', '--snr', TEST_DEF_SNR]),
'9YNY':
(['nrfjprog', '--eraseall', '-f', 'NRF91', '--snr', TEST_DEF_SNR],
['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF91', '--snr', TEST_DEF_SNR], # noqa: E501
['nrfjprog', '--reset', '-f', 'NRF91', '--snr', TEST_DEF_SNR]),
'9YYN':
(['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF91', '--snr', TEST_OVR_SNR, '--sectorerase'], # noqa: E501
['nrfjprog', '--reset', '-f', 'NRF91', '--snr', TEST_OVR_SNR]),
'9YYY':
(['nrfjprog', '--eraseall', '-f', 'NRF91', '--snr', TEST_OVR_SNR],
['nrfjprog', '--program', RC_KERNEL_HEX, '-f', 'NRF91', '--snr', TEST_OVR_SNR], # noqa: E501
['nrfjprog', '--reset', '-f', 'NRF91', '--snr', TEST_OVR_SNR]),
}
def expected_commands(family, softreset, snr, erase):
'''Expected NrfJprogBinaryRunner results given parameters.
Returns a factory function which expects the following arguments:
- family: string, 'NRF51', 'NRF52' or 'NRF91'
- softreset: boolean, controls whether soft reset is performed
- snr: string serial number of board, or None
- erase: boolean, whether to do a full chip erase or not
'''
expected_key = '{}{}{}{}'.format(
'1' if family == 'NRF51' else '2' if family == 'NRF52' else '9',
'Y' if softreset else 'N',
'Y' if snr else 'N',
'Y' if erase else 'N')
return EXPECTED_COMMANDS[expected_key]
#
# Test cases
#
TEST_CASES = [(f, sr, snr, e)
for f in ('NRF51', 'NRF52', 'NRF91')
for sr in (False, True)
for snr in (TEST_OVR_SNR, None)
for e in (False, True)]
def get_board_snr_patch():
return TEST_DEF_SNR
def require_patch(program):
assert program == 'nrfjprog'
def id_fn(test_case):
ret = ''
for x in test_case:
if x in ('NRF51', 'NRF52'):
ret += x[-1:]
else:
ret += 'Y' if x else 'N'
return ret
@pytest.mark.parametrize('test_case', TEST_CASES, ids=id_fn)
@patch('runners.core.ZephyrBinaryRunner.require', side_effect=require_patch)
@patch('runners.nrfjprog.NrfJprogBinaryRunner.get_board_snr_from_user',
side_effect=get_board_snr_patch)
@patch('runners.nrfjprog.NrfJprogBinaryRunner.check_call')
def test_nrfjprog_init(cc, get_snr, req, test_case, runner_config):
family, softreset, snr, erase = test_case
runner = NrfJprogBinaryRunner(runner_config, family, softreset, snr,
erase=erase)
runner.run('flash')
assert req.called
assert cc.call_args_list == [call(x) for x in
expected_commands(*test_case)]
if snr is None:
get_snr.assert_called_once_with()
else:
get_snr.assert_not_called()
@pytest.mark.parametrize('test_case', TEST_CASES, ids=id_fn)
@patch('runners.core.ZephyrBinaryRunner.require', side_effect=require_patch)
@patch('runners.nrfjprog.NrfJprogBinaryRunner.get_board_snr_from_user',
side_effect=get_board_snr_patch)
@patch('runners.nrfjprog.NrfJprogBinaryRunner.check_call')
def test_nrfjprog_create(cc, get_snr, req, test_case, runner_config):
family, softreset, snr, erase = test_case
args = ['--nrf-family', family]
if softreset:
args.append('--softreset')
if snr is not None:
args.extend(['--snr', snr])
if erase:
args.append('--erase')
parser = argparse.ArgumentParser()
NrfJprogBinaryRunner.add_parser(parser)
arg_namespace = parser.parse_args(args)
runner = NrfJprogBinaryRunner.create(runner_config, arg_namespace)
runner.run('flash')
assert req.called
assert cc.call_args_list == [call(x) for x in
expected_commands(*test_case)]
if snr is None:
get_snr.assert_called_once_with()
else:
get_snr.assert_not_called()
| GiulianoFranchetto/zephyr | scripts/west_commands/tests/test_nrfjprog.py | Python | apache-2.0 | 9,816 |
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for managing distrubtion strategies."""
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
flags.DEFINE_string('tpu', None, 'BNS address for the TPU')
flags.DEFINE_bool('use_gpu', False, 'If True a MirroredStrategy will be used.')
def get_strategy(tpu, use_gpu):
"""Utility to create a `tf.DistributionStrategy` for TPU or GPU.
If neither is being used a DefaultStrategy is returned which allows executing
on CPU only.
Args:
tpu: BNS address of TPU to use. Note the flag and param are called TPU as
that is what the xmanager utilities call.
use_gpu: Whether a GPU should be used. This will create a MirroredStrategy.
Raises:
ValueError if both tpu and use_gpu are set.
Returns:
An instance of a `tf.DistributionStrategy`.
"""
if tpu and use_gpu:
raise ValueError('Only one of tpu or use_gpu should be provided.')
if tpu or use_gpu:
logging.info('Devices: \n%s', tf.config.list_logical_devices())
if tpu:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
else:
strategy = tf.distribute.MirroredStrategy()
logging.info('Devices after getting strategy:\n%s',
tf.config.list_logical_devices())
else:
strategy = tf.distribute.get_strategy()
return strategy
| tensorflow/agents | tf_agents/train/utils/strategy_utils.py | Python | apache-2.0 | 2,113 |
#!/usr/bin/env python
# Lint as: python3
"""Tests for PrometheusStatsCollector."""
from absl import app
from grr_response_core.stats import stats_test_utils
from grr_response_server import prometheus_stats_collector
from grr.test_lib import test_lib
class PrometheusStatsCollectorTest(stats_test_utils.StatsCollectorTest):
def _CreateStatsCollector(self):
return prometheus_stats_collector.PrometheusStatsCollector()
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| google/grr | grr/server/grr_response_server/prometheus_stats_collector_test.py | Python | apache-2.0 | 514 |
Subsets and Splits