repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
uclouvain/osis
|
learning_unit/tests/calendar/test_learning_unit_enrollment_calendar.py
|
1
|
3166
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import datetime
from django.forms import model_to_dict
from django.test import TestCase
from base.models.academic_calendar import AcademicCalendar
from base.models.enums.academic_calendar_type import AcademicCalendarTypes
from base.tests.factories.academic_year import create_current_academic_year, AcademicYearFactory
from learning_unit.calendar.learning_unit_enrollment_calendar import LearningUnitEnrollmentCalendar
class TestLearningUnitEnrollmentCalendarEnsureConsistencyUntilNPlus6(TestCase):
@classmethod
def setUpTestData(cls):
cls.current_academic_year = create_current_academic_year()
AcademicYearFactory.produce_in_future(cls.current_academic_year.year)
def test_ensure_consistency_until_n_plus_6_assert_default_value(self):
LearningUnitEnrollmentCalendar.ensure_consistency_until_n_plus_6()
qs = AcademicCalendar.objects.filter(reference=AcademicCalendarTypes.COURSE_ENROLLMENT.name)
self.assertEqual(qs.count(), 7)
self.assertDictEqual(
model_to_dict(qs.first(), fields=('title', 'reference', 'data_year', 'start_date', 'end_date')),
{
"title": "Inscription aux cours",
"reference": AcademicCalendarTypes.COURSE_ENROLLMENT.name,
"data_year": self.current_academic_year.pk,
"start_date": datetime.date(self.current_academic_year.year, 9, 1),
"end_date": datetime.date(self.current_academic_year.year, 10, 31),
}
)
def test_ensure_consistency_until_n_plus_6_assert_idempotent(self):
for _ in range(5):
LearningUnitEnrollmentCalendar.ensure_consistency_until_n_plus_6()
self.assertEqual(
AcademicCalendar.objects.filter(
reference=AcademicCalendarTypes.COURSE_ENROLLMENT.name
).count(),
7
)
|
agpl-3.0
|
mcrowson/django
|
tests/proxy_models/tests.py
|
260
|
16941
|
from __future__ import unicode_literals
import datetime
from django.apps import apps
from django.contrib import admin
from django.contrib.auth.models import User as AuthUser
from django.contrib.contenttypes.models import ContentType
from django.core import checks, exceptions, management
from django.core.urlresolvers import reverse
from django.db import DEFAULT_DB_ALIAS, models
from django.db.models import signals
from django.test import TestCase, override_settings
from .admin import admin as force_admin_model_registration # NOQA
from .models import (
Abstract, BaseUser, Bug, Country, Improvement, Issue, LowerStatusPerson,
MyPerson, MyPersonProxy, OtherPerson, Person, ProxyBug, ProxyImprovement,
ProxyProxyBug, ProxyTrackerUser, State, StateProxy, StatusPerson,
TrackerUser, User, UserProxy, UserProxyProxy,
)
class ProxyModelTests(TestCase):
def test_same_manager_queries(self):
"""
The MyPerson model should be generating the same database queries as
the Person model (when the same manager is used in each case).
"""
my_person_sql = MyPerson.other.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
person_sql = Person.objects.order_by("name").query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertEqual(my_person_sql, person_sql)
def test_inheretance_new_table(self):
"""
The StatusPerson models should have its own table (it's using ORM-level
inheritance).
"""
sp_sql = StatusPerson.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
p_sql = Person.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertNotEqual(sp_sql, p_sql)
def test_basic_proxy(self):
"""
Creating a Person makes them accessible through the MyPerson proxy.
"""
person = Person.objects.create(name="Foo McBar")
self.assertEqual(len(Person.objects.all()), 1)
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(MyPerson.objects.get(name="Foo McBar").id, person.id)
self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name())
def test_no_proxy(self):
"""
Person is not proxied by StatusPerson subclass.
"""
Person.objects.create(name="Foo McBar")
self.assertEqual(list(StatusPerson.objects.all()), [])
def test_basic_proxy_reverse(self):
"""
A new MyPerson also shows up as a standard Person.
"""
MyPerson.objects.create(name="Bazza del Frob")
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(len(Person.objects.all()), 1)
LowerStatusPerson.objects.create(status="low", name="homer")
lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()]
self.assertEqual(lsps, ["homer"])
def test_correct_type_proxy_of_proxy(self):
"""
Correct type when querying a proxy of proxy
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
pp = sorted(mpp.name for mpp in MyPersonProxy.objects.all())
self.assertEqual(pp, ['Bazza del Frob', 'Foo McBar', 'homer'])
def test_proxy_included_in_ancestors(self):
"""
Proxy models are included in the ancestors for a model's DoesNotExist
and MultipleObjectsReturned
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(
Person.DoesNotExist,
MyPersonProxy.objects.get,
name='Zathras'
)
self.assertRaises(
Person.MultipleObjectsReturned,
MyPersonProxy.objects.get,
id__lt=max_id + 1
)
self.assertRaises(
Person.DoesNotExist,
StatusPerson.objects.get,
name='Zathras'
)
StatusPerson.objects.create(name='Bazza Jr.')
StatusPerson.objects.create(name='Foo Jr.')
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(
Person.MultipleObjectsReturned,
StatusPerson.objects.get,
id__lt=max_id + 1
)
def test_abc(self):
"""
All base classes must be non-abstract
"""
def build_abc():
class NoAbstract(Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_abc)
def test_no_cbc(self):
"""
The proxy must actually have one concrete base class
"""
def build_no_cbc():
class TooManyBases(Person, Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_cbc)
def test_no_base_classes(self):
def build_no_base_classes():
class NoBaseClasses(models.Model):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_base_classes)
def test_new_fields(self):
class NoNewFields(Person):
newfield = models.BooleanField()
class Meta:
proxy = True
# don't register this model in the app_cache for the current app,
# otherwise the check fails when other tests are being run.
app_label = 'no_such_app'
errors = NoNewFields.check()
expected = [
checks.Error(
"Proxy model 'NoNewFields' contains model fields.",
hint=None,
obj=None,
id='models.E017',
)
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPABLE_MODEL='proxy_models.AlternateModel')
def test_swappable(self):
# The models need to be removed after the test in order to prevent bad
# interactions with the flush operation in other tests.
_old_models = apps.app_configs['proxy_models'].models.copy()
try:
class SwappableModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class AlternateModel(models.Model):
pass
# You can't proxy a swapped model
with self.assertRaises(TypeError):
class ProxyModel(SwappableModel):
class Meta:
proxy = True
finally:
apps.app_configs['proxy_models'].models = _old_models
apps.all_models['proxy_models'] = _old_models
apps.clear_cache()
def test_myperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in MyPerson.objects.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in MyPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'fred'])
def test_otherperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in OtherPerson.objects.all()]
self.assertEqual(resp, ['barney', 'wilma'])
resp = [p.name for p in OtherPerson.excluder.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in OtherPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'wilma'])
def test_permissions_created(self):
from django.contrib.auth.models import Permission
try:
Permission.objects.get(name="May display users information")
except Permission.DoesNotExist:
self.fail("The permission 'May display users information' has not been created")
def test_proxy_model_signals(self):
"""
Test save signals for proxy models
"""
output = []
def make_handler(model, event):
def _handler(*args, **kwargs):
output.append('%s %s save' % (model, event))
return _handler
h1 = make_handler('MyPerson', 'pre')
h2 = make_handler('MyPerson', 'post')
h3 = make_handler('Person', 'pre')
h4 = make_handler('Person', 'post')
signals.pre_save.connect(h1, sender=MyPerson)
signals.post_save.connect(h2, sender=MyPerson)
signals.pre_save.connect(h3, sender=Person)
signals.post_save.connect(h4, sender=Person)
MyPerson.objects.create(name="dino")
self.assertEqual(output, [
'MyPerson pre save',
'MyPerson post save'
])
output = []
h5 = make_handler('MyPersonProxy', 'pre')
h6 = make_handler('MyPersonProxy', 'post')
signals.pre_save.connect(h5, sender=MyPersonProxy)
signals.post_save.connect(h6, sender=MyPersonProxy)
MyPersonProxy.objects.create(name="pebbles")
self.assertEqual(output, [
'MyPersonProxy pre save',
'MyPersonProxy post save'
])
signals.pre_save.disconnect(h1, sender=MyPerson)
signals.post_save.disconnect(h2, sender=MyPerson)
signals.pre_save.disconnect(h3, sender=Person)
signals.post_save.disconnect(h4, sender=Person)
signals.pre_save.disconnect(h5, sender=MyPersonProxy)
signals.post_save.disconnect(h6, sender=MyPersonProxy)
def test_content_type(self):
ctype = ContentType.objects.get_for_model
self.assertIs(ctype(Person), ctype(OtherPerson))
def test_user_userproxy_userproxyproxy(self):
User.objects.create(name='Bruce')
resp = [u.name for u in User.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxyProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_proxy_for_model(self):
self.assertEqual(UserProxy, UserProxyProxy._meta.proxy_for_model)
def test_concrete_model(self):
self.assertEqual(User, UserProxyProxy._meta.concrete_model)
def test_proxy_delete(self):
"""
Proxy objects can be deleted
"""
User.objects.create(name='Bruce')
u2 = UserProxy.objects.create(name='George')
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce', 'George'])
u2.delete()
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_select_related(self):
"""
We can still use `select_related()` to include related models in our
querysets.
"""
country = Country.objects.create(name='Australia')
State.objects.create(name='New South Wales', country=country)
resp = [s.name for s in State.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
resp = [s.name for s in StateProxy.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
self.assertEqual(StateProxy.objects.get(name='New South Wales').name,
'New South Wales')
resp = StateProxy.objects.select_related().get(name='New South Wales')
self.assertEqual(resp.name, 'New South Wales')
def test_filter_proxy_relation_reverse(self):
tu = TrackerUser.objects.create(
name='Contributor', status='contrib')
with self.assertRaises(exceptions.FieldError):
TrackerUser.objects.filter(issue=None),
self.assertQuerysetEqual(
ProxyTrackerUser.objects.filter(issue=None),
[tu], lambda x: x
)
def test_proxy_bug(self):
contributor = ProxyTrackerUser.objects.create(name='Contributor',
status='contrib')
someone = BaseUser.objects.create(name='Someone')
Bug.objects.create(summary='fix this', version='1.1beta',
assignee=contributor, reporter=someone)
pcontributor = ProxyTrackerUser.objects.create(name='OtherContributor',
status='proxy')
Improvement.objects.create(summary='improve that', version='1.1beta',
assignee=contributor, reporter=pcontributor,
associated_bug=ProxyProxyBug.objects.all()[0])
# Related field filter on proxy
resp = ProxyBug.objects.get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Select related + filter on proxy
resp = ProxyBug.objects.select_related().get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Proxy of proxy, select_related + filter
resp = ProxyProxyBug.objects.select_related().get(
version__icontains='beta'
)
self.assertEqual(repr(resp), '<ProxyProxyBug: ProxyProxyBug:fix this>')
# Select related + filter on a related proxy field
resp = ProxyImprovement.objects.select_related().get(
reporter__name__icontains='butor'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
# Select related + filter on a related proxy of proxy field
resp = ProxyImprovement.objects.select_related().get(
associated_bug__summary__icontains='fix'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
def test_proxy_load_from_fixture(self):
management.call_command('loaddata', 'mypeople.json', verbosity=0)
p = MyPerson.objects.get(pk=100)
self.assertEqual(p.name, 'Elvis Presley')
def test_eq(self):
self.assertEqual(MyPerson(id=100), Person(id=100))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='proxy_models.urls',)
class ProxyModelAdminTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = AuthUser.objects.create(
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='[email protected]', is_staff=True, is_active=True,
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.tu1 = ProxyTrackerUser.objects.create(name='Django Pony', status='emperor')
cls.i1 = Issue.objects.create(summary="Pony's Issue", assignee=cls.tu1)
def test_cascade_delete_proxy_model_admin_warning(self):
"""
Test if admin gives warning about cascade deleting models referenced
to concrete model by deleting proxy object.
"""
tracker_user = TrackerUser.objects.all()[0]
base_user = BaseUser.objects.all()[0]
issue = Issue.objects.all()[0]
with self.assertNumQueries(7):
collector = admin.utils.NestedObjects('default')
collector.collect(ProxyTrackerUser.objects.all())
self.assertIn(tracker_user, collector.edges.get(None, ()))
self.assertIn(base_user, collector.edges.get(None, ()))
self.assertIn(issue, collector.edges.get(tracker_user, ()))
def test_delete_str_in_model_admin(self):
"""
Test if the admin delete page shows the correct string representation
for a proxy model.
"""
user = TrackerUser.objects.get(name='Django Pony')
proxy = ProxyTrackerUser.objects.get(name='Django Pony')
user_str = 'Tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_trackeruser_change', args=(user.pk,)), user
)
proxy_str = 'Proxy tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_proxytrackeruser_change', args=(proxy.pk,)), proxy
)
self.client.login(username='super', password='secret')
response = self.client.get(reverse('admin_proxy:proxy_models_trackeruser_delete', args=(user.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, user_str)
response = self.client.get(reverse('admin_proxy:proxy_models_proxytrackeruser_delete', args=(proxy.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, proxy_str)
self.client.logout()
|
bsd-3-clause
|
kitsunde/ansible
|
lib/ansible/plugins/action/include_vars.py
|
13
|
1881
|
# (c) 2013-2014, Benno Joy <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from types import NoneType
from ansible.errors import AnsibleError
from ansible.parsing import DataLoader
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
source = self._task.args.get('_raw_params')
if self._task._role:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'vars', source)
else:
source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'vars', source)
if os.path.exists(source):
(data, show_content) = self._loader._get_file_contents(source)
data = self._loader.load(data, show_content)
if data is None:
data = {}
if not isinstance(data, dict):
raise AnsibleError("%s must be stored as a dictionary/hash" % source)
return dict(ansible_facts=data, _ansible_no_log=not show_content)
else:
return dict(failed=True, msg="Source file not found.", file=source)
|
gpl-3.0
|
absoludity/servo
|
tests/wpt/update/fetchlogs.py
|
222
|
3183
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import cStringIO
import gzip
import json
import os
import requests
import urlparse
treeherder_base = "https://treeherder.mozilla.org/"
"""Simple script for downloading structured logs from treeherder.
For the moment this is specialised to work with web-platform-tests
logs; in due course it should move somewhere generic and get hooked
up to mach or similar"""
# Interpretation of the "job" list from
# https://github.com/mozilla/treeherder-service/blob/master/treeherder/webapp/api/utils.py#L18
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument("branch", action="store",
help="Branch on which jobs ran")
parser.add_argument("commit",
action="store",
help="Commit hash for push")
return parser
def download(url, prefix, dest, force_suffix=True):
if dest is None:
dest = "."
if prefix and not force_suffix:
name = os.path.join(dest, prefix + ".log")
else:
name = None
counter = 0
while not name or os.path.exists(name):
counter += 1
sep = "" if not prefix else "-"
name = os.path.join(dest, prefix + sep + str(counter) + ".log")
with open(name, "wb") as f:
resp = requests.get(url, stream=True)
for chunk in resp.iter_content(1024):
f.write(chunk)
def get_blobber_url(branch, job):
job_id = job["id"]
resp = requests.get(urlparse.urljoin(treeherder_base,
"/api/project/%s/artifact/?job_id=%i&name=Job%%20Info" % (branch,
job_id)))
job_data = resp.json()
if job_data:
assert len(job_data) == 1
job_data = job_data[0]
try:
details = job_data["blob"]["job_details"]
for item in details:
if item["value"] == "wpt_raw.log":
return item["url"]
except:
return None
def get_structured_logs(branch, commit, dest=None):
resp = requests.get(urlparse.urljoin(treeherder_base, "/api/project/%s/resultset/?revision=%s" % (branch, commit)))
revision_data = resp.json()
result_set = revision_data["results"][0]["id"]
resp = requests.get(urlparse.urljoin(treeherder_base, "/api/project/%s/jobs/?result_set_id=%s&count=2000&exclusion_profile=false" % (branch, result_set)))
job_data = resp.json()
for result in job_data["results"]:
job_type_name = result["job_type_name"]
if job_type_name.startswith("W3C Web Platform"):
url = get_blobber_url(branch, result)
if url:
prefix = result["platform"] # platform
download(url, prefix, None)
def main():
parser = create_parser()
args = parser.parse_args()
get_structured_logs(args.branch, args.commit)
if __name__ == "__main__":
main()
|
mpl-2.0
|
marctc/django
|
tests/admin_filters/models.py
|
108
|
2105
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Book(models.Model):
title = models.CharField(max_length=50)
year = models.PositiveIntegerField(null=True, blank=True)
author = models.ForeignKey(
User,
models.SET_NULL,
verbose_name="Verbose Author",
related_name='books_authored',
blank=True, null=True,
)
contributors = models.ManyToManyField(User, verbose_name="Verbose Contributors", related_name='books_contributed', blank=True)
is_best_seller = models.NullBooleanField(default=0)
date_registered = models.DateField(null=True)
no = models.IntegerField(verbose_name='number', blank=True, null=True) # This field is intentionally 2 characters long. See #16080.
def __str__(self):
return self.title
@python_2_unicode_compatible
class Department(models.Model):
code = models.CharField(max_length=4, unique=True)
description = models.CharField(max_length=50, blank=True, null=True)
def __str__(self):
return self.description
@python_2_unicode_compatible
class Employee(models.Model):
department = models.ForeignKey(Department, models.CASCADE, to_field="code")
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class TaggedItem(models.Model):
tag = models.SlugField()
content_type = models.ForeignKey(ContentType, models.CASCADE, related_name='tagged_items')
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.tag
@python_2_unicode_compatible
class Bookmark(models.Model):
url = models.URLField()
tags = GenericRelation(TaggedItem)
def __str__(self):
return self.url
|
bsd-3-clause
|
cluo512/storm
|
storm-multilang/python/src/main/resources/resources/storm.py
|
10
|
6826
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import traceback
from collections import deque
try:
import simplejson as json
except ImportError:
import json
json_encode = lambda x: json.dumps(x)
json_decode = lambda x: json.loads(x)
#reads lines and reconstructs newlines appropriately
def readMsg():
msg = ""
while True:
line = sys.stdin.readline()
if not line:
raise Exception('Read EOF from stdin')
if line[0:-1] == "end":
break
msg = msg + line
return json_decode(msg[0:-1])
MODE = None
ANCHOR_TUPLE = None
#queue up commands we read while trying to read taskids
pending_commands = deque()
def readTaskIds():
if pending_taskids:
return pending_taskids.popleft()
else:
msg = readMsg()
while type(msg) is not list:
pending_commands.append(msg)
msg = readMsg()
return msg
#queue up taskids we read while trying to read commands/tuples
pending_taskids = deque()
def readCommand():
if pending_commands:
return pending_commands.popleft()
else:
msg = readMsg()
while type(msg) is list:
pending_taskids.append(msg)
msg = readMsg()
return msg
def readTuple():
cmd = readCommand()
return Tuple(cmd["id"], cmd["comp"], cmd["stream"], cmd["task"], cmd["tuple"])
def sendMsgToParent(msg):
print(json_encode(msg))
print("end")
sys.stdout.flush()
def sync():
sendMsgToParent({'command':'sync'})
def sendpid(heartbeatdir):
pid = os.getpid()
sendMsgToParent({'pid':pid})
open(heartbeatdir + "/" + str(pid), "w").close()
def emit(*args, **kwargs):
__emit(*args, **kwargs)
return readTaskIds()
def emitDirect(task, *args, **kwargs):
kwargs["directTask"] = task
__emit(*args, **kwargs)
def __emit(*args, **kwargs):
global MODE
if MODE == Bolt:
emitBolt(*args, **kwargs)
elif MODE == Spout:
emitSpout(*args, **kwargs)
def emitBolt(tup, stream=None, anchors = [], directTask=None):
global ANCHOR_TUPLE
if ANCHOR_TUPLE is not None:
anchors = [ANCHOR_TUPLE]
m = {"command": "emit"}
if stream is not None:
m["stream"] = stream
m["anchors"] = [a.id for a in anchors]
if directTask is not None:
m["task"] = directTask
m["tuple"] = tup
sendMsgToParent(m)
def emitSpout(tup, stream=None, id=None, directTask=None):
m = {"command": "emit"}
if id is not None:
m["id"] = id
if stream is not None:
m["stream"] = stream
if directTask is not None:
m["task"] = directTask
m["tuple"] = tup
sendMsgToParent(m)
def ack(tup):
sendMsgToParent({"command": "ack", "id": tup.id})
def fail(tup):
sendMsgToParent({"command": "fail", "id": tup.id})
def reportError(msg):
sendMsgToParent({"command": "error", "msg": msg})
def log(msg, level=2):
sendMsgToParent({"command": "log", "msg": msg, "level":level})
def logTrace(msg):
log(msg, 0)
def logDebug(msg):
log(msg, 1)
def logInfo(msg):
log(msg, 2)
def logWarn(msg):
log(msg, 3)
def logError(msg):
log(msg, 4)
def rpcMetrics(name, params):
sendMsgToParent({"command": "metrics", "name": name, "params": params})
def initComponent():
setupInfo = readMsg()
sendpid(setupInfo['pidDir'])
return [setupInfo['conf'], setupInfo['context']]
class Tuple(object):
def __init__(self, id, component, stream, task, values):
self.id = id
self.component = component
self.stream = stream
self.task = task
self.values = values
def __repr__(self):
return '<%s%s>' % (
self.__class__.__name__,
''.join(' %s=%r' % (k, self.__dict__[k]) for k in sorted(self.__dict__.keys())))
def is_heartbeat_tuple(self):
return self.task == -1 and self.stream == "__heartbeat"
class Bolt(object):
def initialize(self, stormconf, context):
pass
def process(self, tuple):
pass
def run(self):
global MODE
MODE = Bolt
conf, context = initComponent()
try:
self.initialize(conf, context)
while True:
tup = readTuple()
if tup.is_heartbeat_tuple():
sync()
else:
self.process(tup)
except Exception as e:
reportError(traceback.format_exc(e))
class BasicBolt(object):
def initialize(self, stormconf, context):
pass
def process(self, tuple):
pass
def run(self):
global MODE
MODE = Bolt
global ANCHOR_TUPLE
conf, context = initComponent()
try:
self.initialize(conf, context)
while True:
tup = readTuple()
if tup.is_heartbeat_tuple():
sync()
else:
ANCHOR_TUPLE = tup
try:
self.process(tup)
ack(tup)
except Exception as e:
reportError(traceback.format_exc(e))
fail(tup)
except Exception as e:
reportError(traceback.format_exc(e))
class Spout(object):
def initialize(self, conf, context):
pass
def ack(self, id):
pass
def fail(self, id):
pass
def nextTuple(self):
pass
def run(self):
global MODE
MODE = Spout
conf, context = initComponent()
try:
self.initialize(conf, context)
while True:
msg = readCommand()
if msg["command"] == "next":
self.nextTuple()
if msg["command"] == "ack":
self.ack(msg["id"])
if msg["command"] == "fail":
self.fail(msg["id"])
sync()
except Exception as e:
reportError(traceback.format_exc(e))
|
apache-2.0
|
duhzecca/cinder
|
cinder/tests/unit/test_netapp.py
|
9
|
58929
|
# Copyright (c) 2012 NetApp, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for NetApp volume driver."""
from lxml import etree
import mock
import six
from six.moves import BaseHTTPServer
from six.moves import http_client
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
fake_api as netapp_api)
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import common
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp import options
from cinder.volume.drivers.netapp import utils
FAKE_CONNECTION_HTTP = {
'transport_type': 'http',
'username': 'admin',
'password': 'pass',
'hostname': '127.0.0.1',
'port': None,
'vserver': 'openstack',
}
def create_configuration():
configuration = conf.Configuration(None)
configuration.append_config_values(options.netapp_connection_opts)
configuration.append_config_values(options.netapp_transport_opts)
configuration.append_config_values(options.netapp_basicauth_opts)
configuration.append_config_values(options.netapp_cluster_opts)
configuration.append_config_values(options.netapp_7mode_opts)
configuration.append_config_values(options.netapp_provisioning_opts)
return configuration
class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""HTTP handler that doesn't spam the log."""
def log_message(self, format, *args):
pass
class FakeHttplibSocket(object):
"""A fake socket implementation for http_client.HTTPResponse."""
def __init__(self, value):
self._rbuffer = six.StringIO(value)
self._wbuffer = six.StringIO('')
oldclose = self._wbuffer.close
def newclose():
self.result = self._wbuffer.getvalue()
oldclose()
self._wbuffer.close = newclose
def makefile(self, mode, _other):
"""Returns the socket's internal buffer"""
if mode == 'r' or mode == 'rb':
return self._rbuffer
if mode == 'w' or mode == 'wb':
return self._wbuffer
RESPONSE_PREFIX_DIRECT_CMODE = """<?xml version='1.0' encoding='UTF-8' ?>
<!DOCTYPE netapp SYSTEM 'file:/etc/netapp_gx.dtd'>"""
RESPONSE_PREFIX_DIRECT_7MODE = """<?xml version='1.0' encoding='UTF-8' ?>
<!DOCTYPE netapp SYSTEM "/na_admin/netapp_filer.dtd">"""
RESPONSE_PREFIX_DIRECT = """
<netapp version='1.15' xmlns='http://www.netapp.com/filer/admin'>"""
RESPONSE_SUFFIX_DIRECT = """</netapp>"""
class FakeDirectCMODEServerHandler(FakeHTTPRequestHandler):
"""HTTP handler that fakes enough stuff to allow the driver to run."""
def do_GET(s):
"""Respond to a GET request."""
if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path:
s.send_response(404)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "text/xml; charset=utf-8")
s.end_headers()
out = s.wfile
out.write('<netapp version="1.15">'
'<results reason="Not supported method type"'
' status="failed" errno="Not_Allowed"/></netapp>')
def do_POST(s): # noqa
"""Respond to a POST request."""
if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path:
s.send_response(404)
s.end_headers
return
request_xml = s.rfile.read(int(s.headers['Content-Length']))
root = etree.fromstring(request_xml)
body = [x for x in root.iterchildren()]
request = body[0]
tag = request.tag
api = etree.QName(tag).localname or tag
if 'lun-get-iter' == api:
tag = \
FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag')
if tag is None:
body = """<results status="passed"><attributes-list>
<lun-info>
<alignment>indeterminate</alignment>
<block-size>512</block-size>
<comment></comment><creation-timestamp>1354536362
</creation-timestamp>
<is-space-alloc-enabled>false</is-space-alloc-enabled>
<is-space-reservation-enabled>true
</is-space-reservation-enabled>
<mapped>false</mapped><multiprotocol-type>linux
</multiprotocol-type>
<online>true</online><path>/vol/navneet/lun1</path>
<prefix-size>0</prefix-size><qtree></qtree><read-only>
false</read-only><serial-number>2FfGI$APyN68</serial-number>
<share-state>none</share-state><size>20971520</size>
<size-used>0</size-used><staging>false</staging>
<suffix-size>0</suffix-size>
<uuid>cec1f3d7-3d41-11e2-9cf4-123478563412</uuid>
<volume>navneet</volume><vserver>ben_vserver</vserver>
</lun-info></attributes-list>
<next-tag><lun-get-iter-key-td>
<key-0>ben_vserver</key-0>
<key-1>/vol/navneet/lun2</key-1>
<key-2>navneet</key-2>
<key-3></key-3>
<key-4>lun2</key-4>
</lun-get-iter-key-td>
</next-tag><num-records>1</num-records></results>"""
else:
body = """<results status="passed"><attributes-list>
<lun-info>
<alignment>indeterminate</alignment>
<block-size>512</block-size>
<comment></comment><creation-timestamp>1354536362
</creation-timestamp>
<is-space-alloc-enabled>false</is-space-alloc-enabled>
<is-space-reservation-enabled>true
</is-space-reservation-enabled>
<mapped>false</mapped><multiprotocol-type>linux
</multiprotocol-type>
<online>true</online><path>/vol/navneet/lun3</path>
<prefix-size>0</prefix-size><qtree></qtree><read-only>
false</read-only><serial-number>2FfGI$APyN68
</serial-number>
<share-state>none</share-state><size>20971520</size>
<size-used>0</size-used><staging>false</staging>
<suffix-size>0</suffix-size>
<uuid>cec1f3d7-3d41-11e2-9cf4-123478563412</uuid>
<volume>navneet</volume><vserver>ben_vserver</vserver>
</lun-info></attributes-list>
<num-records>1</num-records></results>"""
elif 'volume-get-iter' == api:
tag = \
FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag')
if tag is None:
body = """<results status="passed"><attributes-list>
<volume-attributes>
<volume-id-attributes><name>iscsi</name>
<owning-vserver-name>Openstack</owning-vserver-name>
</volume-id-attributes>
<volume-space-attributes>
<size-available>214748364</size-available>
</volume-space-attributes>
<volume-state-attributes><is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root><state>online</state>
</volume-state-attributes></volume-attributes>
<volume-attributes>
<volume-id-attributes><name>nfsvol</name>
<owning-vserver-name>openstack</owning-vserver-name>
</volume-id-attributes>
<volume-space-attributes>
<size-available>247483648</size-available>
</volume-space-attributes>
<volume-state-attributes><is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root><state>online</state>
</volume-state-attributes></volume-attributes>
</attributes-list>
<next-tag><volume-get-iter-key-td>
<key-0>openstack</key-0>
<key-1>nfsvol</key-1>
</volume-get-iter-key-td>
</next-tag><num-records>2</num-records></results>"""
else:
body = """<results status="passed"><attributes-list>
<volume-attributes>
<volume-id-attributes><name>iscsi</name>
<owning-vserver-name>Openstack</owning-vserver-name>
</volume-id-attributes>
<volume-space-attributes>
<size-available>4147483648</size-available>
</volume-space-attributes>
<volume-state-attributes><is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root><state>online</state>
</volume-state-attributes></volume-attributes>
<volume-attributes>
<volume-id-attributes><name>nfsvol</name>
<owning-vserver-name>openstack</owning-vserver-name>
</volume-id-attributes>
<volume-space-attributes>
<size-available>8147483648</size-available>
</volume-space-attributes>
<volume-state-attributes><is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root><state>online</state>
</volume-state-attributes></volume-attributes>
</attributes-list>
<num-records>2</num-records></results>"""
elif 'lun-create-by-size' == api:
body = """<results status="passed">
<actual-size>22020096</actual-size></results>"""
elif 'lun-destroy' == api:
body = """<results status="passed"/>"""
elif 'igroup-get-iter' == api:
init_found = True
query = FakeDirectCMODEServerHandler._get_child_by_name(request,
'query')
if query is not None:
igroup_info = FakeDirectCMODEServerHandler._get_child_by_name(
query, 'initiator-group-info')
if igroup_info is not None:
inits = FakeDirectCMODEServerHandler._get_child_by_name(
igroup_info, 'initiators')
if inits is not None:
init_info = \
FakeDirectCMODEServerHandler._get_child_by_name(
inits, 'initiator-info')
init_name = \
FakeDirectCMODEServerHandler._get_child_content(
init_info,
'initiator-name')
if init_name == 'iqn.1993-08.org.debian:01:10':
init_found = True
else:
init_found = False
if init_found:
tag = \
FakeDirectCMODEServerHandler._get_child_by_name(
request, 'tag')
if tag is None:
body = """<results status="passed"><attributes-list>
<initiator-group-info><initiator-group-name>
openstack-01f5297b-00f7-4170-bf30-69b1314b2118
</initiator-group-name>
<initiator-group-os-type>windows</initiator-group-os-type>
<initiator-group-type>iscsi</initiator-group-type>
<initiators>
<initiator-info>
<initiator-name>iqn.1993-08.org.debian:01:10</initiator-name>
</initiator-info></initiators>
<vserver>openstack</vserver></initiator-group-info>
</attributes-list><next-tag>
<igroup-get-iter-key-td>
<key-0>openstack</key-0>
<key-1>
openstack-01f5297b-00f7-4170-bf30-69b1314b2118<
/key-1>
</igroup-get-iter-key-td>
</next-tag><num-records>1</num-records></results>"""
else:
body = """<results status="passed"><attributes-list>
<initiator-group-info><initiator-group-name>
openstack-01f5297b-00f7-4170-bf30-69b1314b2118
</initiator-group-name>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiator-group-type>iscsi</initiator-group-type>
<initiators>
<initiator-info>
<initiator-name>iqn.1993-08.org.debian:01:10</initiator-name>
</initiator-info></initiators>
<vserver>openstack</vserver></initiator-group-info>
</attributes-list><num-records>1</num-records></results>"""
else:
body = """<results status="passed">
<num-records>0</num-records>
</results>"""
elif 'lun-map-get-iter' == api:
tag = \
FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag')
if tag is None:
body = """<results status="passed"><attributes-list>
<lun-map-info>
<initiator-group>openstack-44c5e7e1-3306-4800-9623-259e57d56a83
</initiator-group>
<initiator-group-uuid>948ae304-06e9-11e2</initiator-group-uuid>
<lun-id>0</lun-id>
<lun-uuid>5587e563-06e9-11e2-9cf4-123478563412</lun-uuid>
<path>/vol/openvol/lun1</path>
<vserver>openstack</vserver>
</lun-map-info></attributes-list>
<next-tag>
<lun-map-get-iter-key-td>
<key-0>openstack</key-0>
<key-1>openstack-01f5297b-00f7-4170-bf30-69b1314b2118<
/key-1>
</lun-map-get-iter-key-td>
</next-tag>
<num-records>1</num-records>
</results>"""
else:
body = """<results status="passed"><attributes-list>
<lun-map-info>
<initiator-group>openstack-44c5e7e1-3306-4800-9623-259e57d56a83
</initiator-group>
<initiator-group-uuid>948ae304-06e9-11e2</initiator-group-uuid>
<lun-id>0</lun-id>
<lun-uuid>5587e563-06e9-11e2-9cf4-123478563412</lun-uuid>
<path>/vol/openvol/lun1</path>
<vserver>openstack</vserver>
</lun-map-info></attributes-list><num-records>1</num-records>
</results>"""
elif 'lun-map' == api:
body = """<results status="passed"><lun-id-assigned>1
</lun-id-assigned>
</results>"""
elif 'lun-get-geometry' == api:
body = """<results status="passed"><bytes-per-sector>256
</bytes-per-sector><cylinders>512</cylinders><max-resize-size>
3221225472</max-resize-size><sectors-per-track>512
</sectors-per-track><size>2147483648</size>
<tracks-per-cylinder>256</tracks-per-cylinder></results>"""
elif 'iscsi-service-get-iter' == api:
body = """<results status="passed"><attributes-list>
<iscsi-service-info>
<alias-name>openstack</alias-name>
<is-available>true</is-available>
<node-name>iqn.1992-08.com.netapp:sn.fa9:vs.105</node-name>
<vserver>openstack</vserver></iscsi-service-info>
</attributes-list><num-records>1</num-records></results>"""
elif 'iscsi-interface-get-iter' == api:
body = """<results status="passed"><attributes-list>
<iscsi-interface-list-entry-info><current-node>
fas3170rre-cmode-01
</current-node><current-port>e1b-1165</current-port>
<interface-name>
iscsi_data_if</interface-name>
<ip-address>10.63.165.216</ip-address>
<ip-port>3260</ip-port><is-interface-enabled>true
</is-interface-enabled>
<relative-port-id>5</relative-port-id>
<tpgroup-name>iscsi_data_if</tpgroup-name>
<tpgroup-tag>1038</tpgroup-tag><vserver>
openstack</vserver>
</iscsi-interface-list-entry-info></attributes-list>
<num-records>1</num-records></results>"""
elif 'igroup-create' == api:
body = """<results status="passed"/>"""
elif 'igroup-add' == api:
body = """<results status="passed"/>"""
elif 'clone-create' == api:
body = """<results status="passed"/>"""
elif 'lun-unmap' == api:
body = """<results status="passed"/>"""
elif 'system-get-ontapi-version' == api:
body = """<results status="passed">
<major-version>1</major-version>
<minor-version>19</minor-version>
</results>"""
elif 'vserver-get-iter' == api:
body = """<results status="passed"><attributes-list>
<vserver-info>
<vserver-name>vserver</vserver-name>
<vserver-type>node</vserver-type>
</vserver-info>
</attributes-list>
<num-records>1</num-records></results>"""
elif 'ems-autosupport-log' == api:
body = """<results status="passed"/>"""
elif 'lun-resize' == api:
body = """<results status="passed"/>"""
elif 'lun-get-geometry' == api:
body = """<results status="passed">
<size>1</size>
<bytes-per-sector>2</bytes-per-sector>
<sectors-per-track>8</sectors-per-track>
<tracks-per-cylinder>2</tracks-per-cylinder>
<cylinders>4</cylinders>
<max-resize-size>5</max-resize-size>
</results>"""
elif 'volume-options-list-info' == api:
body = """<results status="passed">
<options>
<option>
<name>compression</name>
<value>off</value>
</option>
</options>
</results>"""
elif 'lun-move' == api:
body = """<results status="passed"/>"""
else:
# Unknown API
s.send_response(500)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "text/xml; charset=utf-8")
s.end_headers()
s.wfile.write(RESPONSE_PREFIX_DIRECT_CMODE)
s.wfile.write(RESPONSE_PREFIX_DIRECT)
s.wfile.write(body)
s.wfile.write(RESPONSE_SUFFIX_DIRECT)
@staticmethod
def _get_child_by_name(self, name):
for child in self.iterchildren():
if child.tag == name or etree.QName(child.tag).localname == name:
return child
return None
@staticmethod
def _get_child_content(self, name):
"""Get the content of the child."""
for child in self.iterchildren():
if child.tag == name or etree.QName(child.tag).localname == name:
return child.text
return None
class FakeDirectCmodeHTTPConnection(object):
"""A fake http_client.HTTPConnection for netapp tests
Requests made via this connection actually get translated and routed into
the fake direct handler above, we then turn the response into
the http_client.HTTPResponse that the caller expects.
"""
def __init__(self, host, timeout=None):
self.host = host
def request(self, method, path, data=None, headers=None):
if not headers:
headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path)
for key, value in headers.items():
req_str += "%s: %s\r\n" % (key, value)
if data:
req_str += '\r\n%s' % data
# NOTE(vish): normally the http transport normalizes from unicode
sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8"))
# NOTE(vish): stop the server from trying to look up address from
# the fake socket
FakeDirectCMODEServerHandler.address_string = lambda x: '127.0.0.1'
self.app = FakeDirectCMODEServerHandler(sock, '127.0.0.1:80', None)
self.sock = FakeHttplibSocket(sock.result)
self.http_response = http_client.HTTPResponse(self.sock)
def set_debuglevel(self, level):
pass
def getresponse(self):
self.http_response.begin()
return self.http_response
def getresponsebody(self):
return self.sock.result
class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
"""Test case for NetAppISCSIDriver"""
volume = {'name': 'lun1', 'size': 2, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None, 'host': 'hostname@backend#vol1'}
snapshot = {'name': 'snapshot1', 'size': 2, 'volume_name': 'lun1',
'volume_size': 2, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
snapshot_fail = {'name': 'snapshot2', 'size': 2, 'volume_name': 'lun1',
'volume_size': 1, 'project_id': 'project'}
volume_sec = {'name': 'vol_snapshot', 'size': 2, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
volume_clone = {'name': 'cl_sm', 'size': 3, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'cl_sm',
'id': 'lun1', 'provider_auth': None,
'project_id': 'project', 'display_name': None,
'display_description': 'lun1',
'volume_type_id': None}
volume_clone_large = {'name': 'cl_lg', 'size': 6, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'cl_lg',
'id': 'lun1', 'provider_auth': None,
'project_id': 'project', 'display_name': None,
'display_description': 'lun1',
'volume_type_id': None}
connector = {'initiator': 'iqn.1993-08.org.debian:01:10'}
vol_fail = {'name': 'lun_fail', 'size': 10000, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None, 'host': 'hostname@backend#vol1'}
vol1 = ssc_cmode.NetAppVolume('lun1', 'openstack')
vol1.state['vserver_root'] = False
vol1.state['status'] = 'online'
vol1.state['junction_active'] = True
vol1.space['size_avl_bytes'] = '4000000000'
vol1.space['size_total_bytes'] = '5000000000'
vol1.space['space-guarantee-enabled'] = False
vol1.space['space-guarantee'] = 'file'
vol1.space['thin_provisioned'] = True
vol1.mirror['mirrored'] = True
vol1.qos['qos_policy_group'] = None
vol1.aggr['name'] = 'aggr1'
vol1.aggr['junction'] = '/vola'
vol1.sis['dedup'] = True
vol1.sis['compression'] = True
vol1.aggr['raid_type'] = 'raiddp'
vol1.aggr['ha_policy'] = 'cfo'
vol1.aggr['disk_type'] = 'SSD'
ssc_map = {'mirrored': set([vol1]), 'dedup': set([vol1]),
'compression': set([vol1]),
'thin': set([vol1]), 'all': set([vol1])}
def setUp(self):
super(NetAppDirectCmodeISCSIDriverTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
self.stubs.Set(
ssc_cmode, 'refresh_cluster_ssc',
lambda a, b, c, synchronous: None)
self.mock_object(utils, 'OpenStackInfo')
# Inject fake netapp_lib module classes.
netapp_api.mock_netapp_lib([common, client_cmode, client_base])
self.mock_object(common.na_utils, 'check_netapp_lib')
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
self.stubs.Set(http_client, 'HTTPConnection',
FakeDirectCmodeHTTPConnection)
driver.do_setup(context='')
self.driver = driver
self.driver.ssc_vols = self.ssc_map
def _set_config(self, configuration):
configuration.netapp_storage_protocol = 'iscsi'
configuration.netapp_login = 'admin'
configuration.netapp_password = 'pass'
configuration.netapp_server_hostname = '127.0.0.1'
configuration.netapp_transport_type = 'http'
configuration.netapp_server_port = None
configuration.netapp_vserver = 'openstack'
return configuration
def test_connect(self):
self.driver.library.zapi_client = mock.MagicMock()
self.driver.library.zapi_client.get_ontapi_version.return_value = \
(1, 20)
self.driver.check_for_setup_error()
def test_do_setup_all_default(self):
self.mock_object(utils, 'OpenStackInfo')
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
mock_client = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
mock_client.assert_called_with(**FAKE_CONNECTION_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
def test_do_setup_http_default_port(self):
self.mock_object(utils, 'OpenStackInfo')
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'http'
driver = common.NetAppDriver(configuration=configuration)
mock_client = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
mock_client.assert_called_with(**FAKE_CONNECTION_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
def test_do_setup_https_default_port(self):
self.mock_object(utils, 'OpenStackInfo')
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
driver = common.NetAppDriver(configuration=configuration)
driver.library._get_root_volume_name = mock.Mock()
mock_client = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
FAKE_CONNECTION_HTTPS = dict(FAKE_CONNECTION_HTTP,
transport_type='https')
mock_client.assert_called_with(**FAKE_CONNECTION_HTTPS)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
def test_do_setup_http_non_default_port(self):
self.mock_object(utils, 'OpenStackInfo')
configuration = self._set_config(create_configuration())
configuration.netapp_server_port = 81
driver = common.NetAppDriver(configuration=configuration)
mock_client = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
FAKE_CONNECTION_HTTP_PORT = dict(FAKE_CONNECTION_HTTP, port=81)
mock_client.assert_called_with(**FAKE_CONNECTION_HTTP_PORT)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
def test_do_setup_https_non_default_port(self):
self.mock_object(utils, 'OpenStackInfo')
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
configuration.netapp_server_port = 446
driver = common.NetAppDriver(configuration=configuration)
driver.library._get_root_volume_name = mock.Mock()
mock_client = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
FAKE_CONNECTION_HTTPS_PORT = dict(FAKE_CONNECTION_HTTP, port=446,
transport_type='https')
mock_client.assert_called_with(**FAKE_CONNECTION_HTTPS_PORT)
def test_create_destroy(self):
self.driver.create_volume(self.volume)
self.driver.delete_volume(self.volume)
def test_create_vol_snapshot_destroy(self):
self.driver.create_volume(self.volume)
self.mock_object(client_7mode.Client, '_check_clone_status')
self.mock_object(self.driver.library, '_clone_lun')
self.driver.create_snapshot(self.snapshot)
self.driver.create_volume_from_snapshot(self.volume_sec, self.snapshot)
self.driver.delete_snapshot(self.snapshot)
self.driver.delete_volume(self.volume)
def test_map_unmap(self):
self.mock_object(client_cmode.Client, 'get_igroup_by_initiators')
self.mock_object(client_cmode.Client, 'get_iscsi_target_details')
self.mock_object(client_cmode.Client, 'get_iscsi_service_details')
self.mock_object(self.driver.library, '_get_or_create_igroup')
self.mock_object(self.driver.library, '_map_lun')
self.mock_object(self.driver.library, '_unmap_lun')
FAKE_PREFERRED_TARGET = {'address': 'http://host:8080', 'port': 80}
FAKE_CONN_PROPERTIES = {'driver_volume_type': 'iscsi', 'data': 'test'}
self.mock_object(self.driver.library,
'_get_preferred_target_from_list',
mock.Mock(return_value=FAKE_PREFERRED_TARGET))
self.mock_object(common.na_utils, 'get_iscsi_connection_properties',
mock.Mock(return_value=FAKE_CONN_PROPERTIES))
self.mock_object(client_cmode.Client,
'get_operational_network_interface_addresses',
mock.Mock(return_value=[]))
self.driver.create_volume(self.volume)
updates = self.driver.create_export(None, self.volume, {})
self.assertTrue(updates['provider_location'])
self.volume['provider_location'] = updates['provider_location']
connection_info = self.driver.initialize_connection(self.volume,
self.connector)
self.assertEqual('iscsi', connection_info['driver_volume_type'])
properties = connection_info['data']
if not properties:
raise AssertionError('Target portal is none')
self.driver.terminate_connection(self.volume, self.connector)
self.driver.delete_volume(self.volume)
def test_cloned_volume_destroy(self):
self.driver.create_volume(self.volume)
self.mock_object(self.driver.library, '_clone_lun')
self.driver.create_cloned_volume(self.snapshot, self.volume)
self.driver.delete_volume(self.snapshot)
self.driver.delete_volume(self.volume)
def test_map_by_creating_igroup(self):
FAKE_IGROUP_INFO = {'initiator-group-name': 'debian',
'initiator-group-os-type': 'linux',
'initiator-group-type': 'igroup'}
FAKE_PREFERRED_TARGET = {'address': 'http://host:8080', 'port': 80}
FAKE_CONN_PROPERTIES = {'driver_volume_type': 'iscsi', 'data': 'test'}
self.mock_object(client_cmode.Client, 'get_igroup_by_initiators',
mock.Mock(return_value=[FAKE_IGROUP_INFO]))
self.mock_object(client_cmode.Client,
'get_operational_network_interface_addresses',
mock.Mock(return_value=[]))
self.mock_object(client_cmode.Client, 'get_iscsi_target_details')
self.mock_object(client_cmode.Client, 'get_iscsi_service_details')
self.mock_object(self.driver.library,
'_get_preferred_target_from_list',
mock.Mock(return_value=FAKE_PREFERRED_TARGET))
self.mock_object(common.na_utils, 'get_iscsi_connection_properties',
mock.Mock(return_value=FAKE_CONN_PROPERTIES))
self.driver.create_volume(self.volume)
updates = self.driver.create_export(None, self.volume, {})
self.assertTrue(updates['provider_location'])
self.volume['provider_location'] = updates['provider_location']
connector_new = {'initiator': 'iqn.1993-08.org.debian:01:1001'}
connection_info = self.driver.initialize_connection(self.volume,
connector_new)
self.assertEqual('iscsi', connection_info['driver_volume_type'])
properties = connection_info['data']
if not properties:
raise AssertionError('Target portal is none')
def test_vol_stats(self):
self.mock_object(client_base.Client, 'provide_ems')
mock_update_vol_stats = self.mock_object(self.driver.library,
'_update_volume_stats')
self.driver.get_volume_stats(refresh=True)
self.assertEqual(mock_update_vol_stats.call_count, 1)
def test_create_vol_snapshot_diff_size_resize(self):
self.driver.create_volume(self.volume)
self.mock_object(self.driver.library, '_clone_source_to_destination')
self.mock_object(self.driver.library, '_clone_lun')
self.driver.create_snapshot(self.snapshot)
self.driver.create_volume_from_snapshot(
self.volume_clone, self.snapshot)
self.driver.delete_snapshot(self.snapshot)
self.driver.delete_volume(self.volume)
def test_create_vol_snapshot_diff_size_subclone(self):
self.driver.create_volume(self.volume)
self.mock_object(self.driver.library, '_clone_lun')
self.mock_object(self.driver.library, '_clone_source_to_destination')
self.driver.create_snapshot(self.snapshot)
self.driver.create_volume_from_snapshot(
self.volume_clone_large, self.snapshot)
self.driver.delete_snapshot(self.snapshot)
self.driver.delete_volume(self.volume)
def test_extend_vol_same_size(self):
self.driver.create_volume(self.volume)
self.driver.extend_volume(self.volume, self.volume['size'])
def test_extend_vol_direct_resize(self):
self.mock_object(self.driver.library.zapi_client,
'get_lun_geometry', mock.Mock(return_value=None))
self.mock_object(self.driver.library, '_do_sub_clone_resize')
self.driver.create_volume(self.volume)
self.driver.extend_volume(self.volume, 3)
def test_extend_vol_sub_lun_clone(self):
self.mock_object(self.driver.library.zapi_client,
'get_lun_geometry', mock.Mock(return_value=None))
self.mock_object(self.driver.library, '_do_sub_clone_resize')
self.driver.create_volume(self.volume)
self.driver.extend_volume(self.volume, 4)
class NetAppDriverNegativeTestCase(test.TestCase):
"""Test case for NetAppDriver"""
def setUp(self):
super(NetAppDriverNegativeTestCase, self).setUp()
# Inject fake netapp_lib module classes.
netapp_api.mock_netapp_lib([common])
self.mock_object(common.na_utils, 'check_netapp_lib')
def test_incorrect_family(self):
self.mock_object(utils, 'OpenStackInfo')
configuration = create_configuration()
configuration.netapp_storage_family = 'xyz_abc'
try:
common.NetAppDriver(configuration=configuration)
raise AssertionError('Wrong storage family is getting accepted.')
except exception.InvalidInput:
pass
def test_incorrect_protocol(self):
self.mock_object(utils, 'OpenStackInfo')
configuration = create_configuration()
configuration.netapp_storage_family = 'ontap'
configuration.netapp_storage_protocol = 'ontap'
try:
common.NetAppDriver(configuration=configuration)
raise AssertionError('Wrong storage protocol is getting accepted.')
except exception.InvalidInput:
pass
class FakeDirect7MODEServerHandler(FakeHTTPRequestHandler):
"""HTTP handler that fakes enough stuff to allow the driver to run."""
def do_GET(s):
"""Respond to a GET request."""
if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path:
s.send_response(404)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "text/xml; charset=utf-8")
s.end_headers()
out = s.wfile
out.write('<netapp version="1.15">'
'<results reason="Not supported method type"'
' status="failed" errno="Not_Allowed"/></netapp>')
def do_POST(s):
"""Respond to a POST request."""
if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path:
s.send_response(404)
s.end_headers
return
request_xml = s.rfile.read(int(s.headers['Content-Length']))
root = etree.fromstring(request_xml)
body = [x for x in root.iterchildren()]
request = body[0]
tag = request.tag
api = etree.QName(tag).localname or tag
if 'lun-list-info' == api:
body = """<results status="passed">
<are-vols-onlining>false</are-vols-onlining>
<are-vols-busy>false</are-vols-busy>
<luns>
<lun-info>
<path>/vol/vol1/lun1</path>
<size>20971520</size>
<online>true</online>
<mapped>false</mapped>
<read-only>false</read-only>
<staging>false</staging>
<share-state>none</share-state>
<multiprotocol-type>linux</multiprotocol-type>
<uuid>e867d844-c2c0-11e0-9282-00a09825b3b5</uuid>
<serial-number>P3lgP4eTyaNl</serial-number>
<block-size>512</block-size>
<is-space-reservation-enabled>true</is-space-reservation-enabled>
<size-used>0</size-used>
<alignment>indeterminate</alignment>
</lun-info>
<lun-info>
<path>/vol/vol1/lun1</path>
<size>20971520</size>
<online>true</online>
<mapped>false</mapped>
<read-only>false</read-only>
<staging>false</staging>
<share-state>none</share-state>
<multiprotocol-type>linux</multiprotocol-type>
<uuid>8e1e9284-c288-11e0-9282-00a09825b3b5</uuid>
<serial-number>P3lgP4eTc3lp</serial-number>
<block-size>512</block-size>
<is-space-reservation-enabled>true</is-space-reservation-enabled>
<size-used>0</size-used>
<alignment>indeterminate</alignment>
</lun-info>
</luns>
</results>"""
elif 'volume-list-info' == api:
body = """<results status="passed">
<volumes>
<volume-info>
<name>vol0</name>
<uuid>019c8f7a-9243-11e0-9281-00a09825b3b5</uuid>
<type>flex</type>
<block-type>32_bit</block-type>
<state>online</state>
<size-total>576914493440</size-total>
<size-used>13820354560</size-used>
<size-available>563094110208</size-available>
<percentage-used>2</percentage-used>
<snapshot-percent-reserved>20</snapshot-percent-reserved>
<snapshot-blocks-reserved>140848264</snapshot-blocks-reserved>
<reserve-required>0</reserve-required>
<reserve>0</reserve>
<reserve-used>0</reserve-used>
<reserve-used-actual>0</reserve-used-actual>
<files-total>20907162</files-total>
<files-used>7010</files-used>
<files-private-used>518</files-private-used>
<inodefile-public-capacity>31142</inodefile-public-capacity>
<inodefile-private-capacity>31142</inodefile-private-capacity>
<quota-init>0</quota-init>
<is-snaplock>false</is-snaplock>
<containing-aggregate>aggr0</containing-aggregate>
<sis>
<sis-info>
<state>disabled</state>
<status>idle</status>
<progress>idle for 70:36:44</progress>
<type>regular</type>
<schedule>sun-sat@0</schedule>
<last-operation-begin>Mon Aug 8 09:34:15 EST 2011
</last-operation-begin>
<last-operation-end>Mon Aug 8 09:34:15 EST 2011
</last-operation-end>
<last-operation-size>0</last-operation-size>
<size-shared>0</size-shared>
<size-saved>0</size-saved>
<percentage-saved>0</percentage-saved>
<compress-saved>0</compress-saved>
<percent-compress-saved>0</percent-compress-saved>
<dedup-saved>0</dedup-saved>
<percent-dedup-saved>0</percent-dedup-saved>
<total-saved>0</total-saved>
<percent-total-saved>0</percent-total-saved>
</sis-info>
</sis>
<compression-info>
<is-compression-enabled>false</is-compression-enabled>
</compression-info>
<space-reserve>volume</space-reserve>
<space-reserve-enabled>true</space-reserve-enabled>
<raid-size>14</raid-size>
<raid-status>raid_dp,sis</raid-status>
<checksum-style>block</checksum-style>
<is-checksum-enabled>true</is-checksum-enabled>
<is-inconsistent>false</is-inconsistent>
<is-unrecoverable>false</is-unrecoverable>
<is-invalid>false</is-invalid>
<is-in-snapmirror-jumpahead>false</is-in-snapmirror-jumpahead>
<mirror-status>unmirrored</mirror-status>
<disk-count>3</disk-count>
<plex-count>1</plex-count>
<plexes>
<plex-info>
<name>/aggr0/plex0</name>
<is-online>true</is-online>
<is-resyncing>false</is-resyncing>
</plex-info>
</plexes>
</volume-info>
<volume-info>
<name>vol1</name>
<uuid>2d50ecf4-c288-11e0-9282-00a09825b3b5</uuid>
<type>flex</type>
<block-type>32_bit</block-type>
<state>online</state>
<size-total>42949672960</size-total>
<size-used>44089344</size-used>
<size-available>42905583616</size-available>
<percentage-used>0</percentage-used>
<snapshot-percent-reserved>20</snapshot-percent-reserved>
<snapshot-blocks-reserved>10485760</snapshot-blocks-reserved>
<reserve-required>8192</reserve-required>
<reserve>8192</reserve>
<reserve-used>0</reserve-used>
<reserve-used-actual>0</reserve-used-actual>
<files-total>1556480</files-total>
<files-used>110</files-used>
<files-private-used>504</files-private-used>
<inodefile-public-capacity>31142</inodefile-public-capacity>
<inodefile-private-capacity>31142</inodefile-private-capacity>
<quota-init>0</quota-init>
<is-snaplock>false</is-snaplock>
<containing-aggregate>aggr1</containing-aggregate>
<sis>
<sis-info>
<state>disabled</state>
<status>idle</status>
<progress>idle for 89:19:59</progress>
<type>regular</type>
<schedule>sun-sat@0</schedule>
<last-operation-begin>Sun Aug 7 14:51:00 EST 2011
</last-operation-begin>
<last-operation-end>Sun Aug 7 14:51:00 EST 2011
</last-operation-end>
<last-operation-size>0</last-operation-size>
<size-shared>0</size-shared>
<size-saved>0</size-saved>
<percentage-saved>0</percentage-saved>
<compress-saved>0</compress-saved>
<percent-compress-saved>0</percent-compress-saved>
<dedup-saved>0</dedup-saved>
<percent-dedup-saved>0</percent-dedup-saved>
<total-saved>0</total-saved>
<percent-total-saved>0</percent-total-saved>
</sis-info>
</sis>
<compression-info>
<is-compression-enabled>false</is-compression-enabled>
</compression-info>
<space-reserve>volume</space-reserve>
<space-reserve-enabled>true</space-reserve-enabled>
<raid-size>7</raid-size>
<raid-status>raid4,sis</raid-status>
<checksum-style>block</checksum-style>
<is-checksum-enabled>true</is-checksum-enabled>
<is-inconsistent>false</is-inconsistent>
<is-unrecoverable>false</is-unrecoverable>
<is-invalid>false</is-invalid>
<is-in-snapmirror-jumpahead>false</is-in-snapmirror-jumpahead>
<mirror-status>unmirrored</mirror-status>
<disk-count>2</disk-count>
<plex-count>1</plex-count>
<plexes>
<plex-info>
<name>/aggr1/plex0</name>
<is-online>true</is-online>
<is-resyncing>false</is-resyncing>
</plex-info>
</plexes>
</volume-info>
</volumes>
</results>"""
elif 'volume-options-list-info' == api:
body = """<results status="passed">
<options>
<volume-option-info>
<name>snapmirrored</name>
<value>off</value>
</volume-option-info>
<volume-option-info>
<name>root</name>
<value>false</value>
</volume-option-info>
<volume-option-info>
<name>ha_policy</name>
<value>cfo</value>
</volume-option-info>
<volume-option-info>
<name>striping</name>
<value>not_striped</value>
</volume-option-info>
<volume-option-info>
<name>compression</name>
<value>off</value>
</volume-option-info>
</options>
</results>"""
elif 'lun-create-by-size' == api:
body = """<results status="passed">
<actual-size>22020096</actual-size></results>"""
elif 'lun-destroy' == api:
body = """<results status="passed"/>"""
elif 'igroup-list-info' == api:
body = """<results status="passed">
<initiator-groups>
<initiator-group-info>
<initiator-group-name>openstack-8bc96490</initiator-group-name>
<initiator-group-type>iscsi</initiator-group-type>
<initiator-group-uuid>b8e1d274-c378-11e0</initiator-group-uuid>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-throttle-borrow>false
</initiator-group-throttle-borrow>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiator-group-alua-enabled>false</initiator-group-alua-enabled>
<initiator-group-report-scsi-name-enabled>true
</initiator-group-report-scsi-name-enabled>
<initiators>
<initiator-info>
<initiator-name>iqn.1993-08.org.debian:01:10</initiator-name>
</initiator-info>
</initiators>
</initiator-group-info>
<initiator-group-info>
<initiator-group-name>iscsi_group</initiator-group-name>
<initiator-group-type>iscsi</initiator-group-type>
<initiator-group-uuid>ccb8cbe4-c36f</initiator-group-uuid>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-throttle-borrow>false
</initiator-group-throttle-borrow>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiator-group-alua-enabled>false</initiator-group-alua-enabled>
<initiator-group-report-scsi-name-enabled>true
</initiator-group-report-scsi-name-enabled>
<initiators>
<initiator-info>
<initiator-name>iqn.1993-08.org.debian:01:10ca</initiator-name>
</initiator-info>
</initiators>
</initiator-group-info>
</initiator-groups>
</results>"""
elif 'lun-map-list-info' == api:
body = """<results status="passed">
<initiator-groups/>
</results>"""
elif 'lun-map' == api:
body = """<results status="passed"><lun-id-assigned>1
</lun-id-assigned>
</results>"""
elif 'iscsi-node-get-name' == api:
body = """<results status="passed">
<node-name>iqn.1992-08.com.netapp:sn.135093938</node-name>
</results>"""
elif 'iscsi-portal-list-info' == api:
body = """<results status="passed">
<iscsi-portal-list-entries>
<iscsi-portal-list-entry-info>
<ip-address>10.61.176.156</ip-address>
<ip-port>3260</ip-port>
<tpgroup-tag>1000</tpgroup-tag>
<interface-name>e0a</interface-name>
</iscsi-portal-list-entry-info>
</iscsi-portal-list-entries>
</results>"""
elif 'igroup-create' == api:
body = """<results status="passed"/>"""
elif 'igroup-add' == api:
body = """<results status="passed"/>"""
elif 'clone-start' == api:
body = """<results status="passed">
<clone-id>
<clone-id-info>
<volume-uuid>2d50ecf4-c288-11e0-9282-00a09825b3b5</volume-uuid>
<clone-op-id>11</clone-op-id>
</clone-id-info>
</clone-id>
</results>"""
elif 'clone-list-status' == api:
body = """<results status="passed">
<status>
<ops-info>
<clone-state>completed</clone-state>
</ops-info>
</status>
</results>"""
elif 'lun-unmap' == api:
body = """<results status="passed"/>"""
elif 'system-get-ontapi-version' == api:
body = """<results status="passed">
<major-version>1</major-version>
<minor-version>8</minor-version>
</results>"""
elif 'lun-set-space-reservation-info' == api:
body = """<results status="passed"/>"""
elif 'ems-autosupport-log' == api:
body = """<results status="passed"/>"""
elif 'lun-resize' == api:
body = """<results status="passed"/>"""
elif 'lun-get-geometry' == api:
body = """<results status="passed">
<size>1</size>
<bytes-per-sector>2</bytes-per-sector>
<sectors-per-track>8</sectors-per-track>
<tracks-per-cylinder>2</tracks-per-cylinder>
<cylinders>4</cylinders>
<max-resize-size>5</max-resize-size>
</results>"""
elif 'volume-options-list-info' == api:
body = """<results status="passed">
<options>
<option>
<name>compression</name>
<value>off</value>
</option>
</options>
</results>"""
elif 'lun-move' == api:
body = """<results status="passed"/>"""
else:
# Unknown API
s.send_response(500)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "text/xml; charset=utf-8")
s.end_headers()
s.wfile.write(RESPONSE_PREFIX_DIRECT_7MODE)
s.wfile.write(RESPONSE_PREFIX_DIRECT)
s.wfile.write(body)
s.wfile.write(RESPONSE_SUFFIX_DIRECT)
class FakeDirect7modeHTTPConnection(object):
"""A fake http_client.HTTPConnection for netapp tests
Requests made via this connection actually get translated and routed into
the fake direct handler above, we then turn the response into
the http_client.HTTPResponse that the caller expects.
"""
def __init__(self, host, timeout=None):
self.host = host
def request(self, method, path, data=None, headers=None):
if not headers:
headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path)
for key, value in headers.items():
req_str += "%s: %s\r\n" % (key, value)
if data:
req_str += '\r\n%s' % data
# NOTE(vish): normally the http transport normailizes from unicode
sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8"))
# NOTE(vish): stop the server from trying to look up address from
# the fake socket
FakeDirect7MODEServerHandler.address_string = lambda x: '127.0.0.1'
self.app = FakeDirect7MODEServerHandler(sock, '127.0.0.1:80', None)
self.sock = FakeHttplibSocket(sock.result)
self.http_response = http_client.HTTPResponse(self.sock)
def set_debuglevel(self, level):
pass
def getresponse(self):
self.http_response.begin()
return self.http_response
def getresponsebody(self):
return self.sock.result
class NetAppDirect7modeISCSIDriverTestCase_NV(test.TestCase):
"""Test case for NetAppISCSIDriver without vfiler"""
volume = {
'name': 'lun1',
'size': 2,
'volume_name': 'lun1',
'os_type': 'linux',
'provider_location': 'lun1',
'id': 'lun1',
'provider_auth': None,
'project_id': 'project',
'display_name': None,
'display_description': 'lun1',
'volume_type_id': None,
'host': 'hostname@backend#vol1',
}
def setUp(self):
super(NetAppDirect7modeISCSIDriverTestCase_NV, self).setUp()
self._custom_setup()
def _custom_setup(self):
self.mock_object(utils, 'OpenStackInfo')
# Inject fake netapp_lib module classes.
netapp_api.mock_netapp_lib([common, client_base, client_7mode])
self.mock_object(common.na_utils, 'check_netapp_lib')
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
self.stubs.Set(http_client, 'HTTPConnection',
FakeDirect7modeHTTPConnection)
self.mock_object(driver.library, '_get_root_volume_name', mock.Mock(
return_value='root'))
driver.do_setup(context='')
driver.root_volume_name = 'root'
self.driver = driver
def _set_config(self, configuration):
configuration.netapp_storage_family = 'ontap_7mode'
configuration.netapp_storage_protocol = 'iscsi'
configuration.netapp_login = 'admin'
configuration.netapp_password = 'pass'
configuration.netapp_server_hostname = '127.0.0.1'
configuration.netapp_transport_type = 'http'
configuration.netapp_server_port = None
return configuration
def test_create_on_select_vol(self):
self.driver.volume_list = ['vol0', 'vol1']
self.driver.create_volume(self.volume)
self.driver.delete_volume(self.volume)
self.driver.volume_list = []
def test_connect(self):
self.driver.library.zapi_client = mock.MagicMock()
self.driver.library.zapi_client.get_ontapi_version.\
return_value = (1, 20)
self.driver.check_for_setup_error()
def test_check_for_setup_error_version(self):
drv = self.driver
self.mock_object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=None))
# check exception raises when version not found
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
self.mock_object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 8)))
# check exception raises when not supported version
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
class NetAppDirect7modeISCSIDriverTestCase_WV(
NetAppDirect7modeISCSIDriverTestCase_NV):
"""Test case for NetAppISCSIDriver with vfiler"""
def setUp(self):
super(NetAppDirect7modeISCSIDriverTestCase_WV, self).setUp()
def _custom_setup(self):
self.mock_object(utils, 'OpenStackInfo')
# Inject fake netapp_lib module classes.
netapp_api.mock_netapp_lib([common, client_base, client_7mode])
self.mock_object(common.na_utils, 'check_netapp_lib')
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
self.stubs.Set(http_client, 'HTTPConnection',
FakeDirect7modeHTTPConnection)
self.mock_object(driver.library, '_get_root_volume_name',
mock.Mock(return_value='root'))
driver.do_setup(context='')
self.driver = driver
self.driver.root_volume_name = 'root'
def _set_config(self, configuration):
configuration.netapp_storage_family = 'ontap_7mode'
configuration.netapp_storage_protocol = 'iscsi'
configuration.netapp_login = 'admin'
configuration.netapp_password = 'pass'
configuration.netapp_server_hostname = '127.0.0.1'
configuration.netapp_transport_type = 'http'
configuration.netapp_server_port = None
configuration.netapp_vfiler = 'openstack'
return configuration
|
apache-2.0
|
cuemacro/findatapy
|
findatapy_examples/equitiesdata_example.py
|
1
|
2756
|
__author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016-2020 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
if __name__ == '__main__':
###### below line CRUCIAL when running Windows, otherwise multiprocessing doesn't work! (not necessary on Linux)
from findatapy.util import SwimPool; SwimPool()
from findatapy.market import Market, MarketDataRequest, MarketDataGenerator
market = Market(market_data_generator=MarketDataGenerator())
# in the config file, we can use keywords 'open', 'high', 'low', 'close' and 'volume' for Yahoo and Google finance data
# download equities data from Alpha Vantage
md_request = MarketDataRequest(
start_date="decade", # start date
data_source='alphavantage', # use Bloomberg as data source
tickers=['Apple', 'Microsoft', 'Citigroup'], # ticker (findatapy)
fields=['close'], # which fields to download
vendor_tickers=['aapl', 'msft', 'c'], # ticker (Alpha Vantage)
vendor_fields=['Close']) # which Bloomberg fields to download)
df = market.fetch_market(md_request)
print(df.tail(n=10))
# NOTE: uses yfinance for Yahoo API
# download equities data from Yahoo
md_request = MarketDataRequest(
start_date="decade", # start date
data_source='yahoo', # use Bloomberg as data source
tickers=['Apple', 'Citigroup'], # ticker (findatapy)
fields=['close'], # which fields to download
vendor_tickers=['aapl', 'c'], # ticker (Yahoo)
vendor_fields=['Close']) # which Bloomberg fields to download)
df = market.fetch_market(md_request)
print(df.tail(n=10))
# download equities data from Google
md_request = MarketDataRequest(
start_date="decade", # start date
data_source='yahoo', # use Bloomberg as data source
tickers=['Apple', 'S&P500-ETF'], # ticker (findatapy)
fields=['close'], # which fields to download
vendor_tickers=['aapl', 'spy'], # ticker (Yahoo)
vendor_fields=['Close']) # which Bloomberg fields to download)
df = market.fetch_market(md_request)
print(df.tail(n=10))
|
apache-2.0
|
Yawning/obfsproxy-wfpadtools
|
obfsproxy/managed/server.py
|
3
|
5001
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from twisted.internet import reactor, error
from pyptlib.server import ServerTransportPlugin
from pyptlib.config import EnvError
import obfsproxy.transports.transports as transports
import obfsproxy.network.launch_transport as launch_transport
import obfsproxy.common.log as logging
import obfsproxy.common.transport_config as transport_config
import pprint
log = logging.get_obfslogger()
def do_managed_server():
"""Start the managed-proxy protocol as a server."""
should_start_event_loop = False
ptserver = ServerTransportPlugin()
try:
ptserver.init(transports.transports.keys())
except EnvError, err:
log.warning("Server managed-proxy protocol failed (%s)." % err)
return
log.debug("pyptlib gave us the following data:\n'%s'", pprint.pformat(ptserver.getDebugData()))
ext_orport = ptserver.config.getExtendedORPort()
authcookie = ptserver.config.getAuthCookieFile()
orport = ptserver.config.getORPort()
server_transport_options = ptserver.config.getServerTransportOptions()
for transport, transport_bindaddr in ptserver.getBindAddresses().items():
# Will hold configuration parameters for the pluggable transport module.
pt_config = transport_config.TransportConfig()
pt_config.setStateLocation(ptserver.config.getStateLocation())
if ext_orport:
pt_config.setListenerMode("ext_server")
else:
pt_config.setListenerMode("server")
pt_config.setObfsproxyMode("managed")
transport_options = ""
if server_transport_options and transport in server_transport_options:
transport_options = server_transport_options[transport]
pt_config.setServerTransportOptions(transport_options)
# Call setup() method for this tranpsort.
transport_class = transports.get_transport_class(transport, 'server')
transport_class.setup(pt_config)
try:
if ext_orport:
addrport = launch_transport.launch_transport_listener(transport,
transport_bindaddr,
'ext_server',
ext_orport,
pt_config,
ext_or_cookie_file=authcookie)
else:
addrport = launch_transport.launch_transport_listener(transport,
transport_bindaddr,
'server',
orport,
pt_config)
except transports.TransportNotFound:
log.warning("Could not find transport '%s'" % transport)
ptserver.reportMethodError(transport, "Could not find transport.")
continue
except error.CannotListenError, e:
error_msg = "Could not set up listener (%s:%s) for '%s' (%s)." % \
(e.interface, e.port, transport, e.socketError[1])
log.warning(error_msg)
ptserver.reportMethodError(transport, error_msg)
continue
should_start_event_loop = True
extra_log = "" # Include server transport options in the log message if we got 'em
if transport_options:
extra_log = " (server transport options: '%s')" % str(transport_options)
log.debug("Successfully launched '%s' at '%s'%s" % (transport, log.safe_addr_str(str(addrport)), extra_log))
# Invoke the transport-specific get_public_server_options()
# method to potentially filter the server transport options
# that should be passed on to Tor and eventually to BridgeDB.
public_options_dict = transport_class.get_public_server_options(transport_options)
public_options_str = None
# If the transport filtered its options:
if public_options_dict:
optlist = []
for k, v in public_options_dict.items():
optlist.append("%s=%s" % (k,v))
public_options_str = ",".join(optlist)
log.debug("do_managed_server: sending only public_options to tor: %s" % public_options_str)
# Report success for this transport.
# If public_options_str is None then all of the
# transport options from ptserver are used instead.
ptserver.reportMethodSuccess(transport, addrport, public_options_str)
ptserver.reportMethodsEnd()
if should_start_event_loop:
log.info("Starting up the event loop.")
reactor.run()
else:
log.info("No transports launched. Nothing to do.")
|
bsd-3-clause
|
angelapper/edx-platform
|
lms/djangoapps/notes/models.py
|
6
|
3226
|
import json
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.html import strip_tags
from openedx.core.djangoapps.xmodule_django.models import CourseKeyField
class Note(models.Model):
user = models.ForeignKey(User, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
uri = models.CharField(max_length=255, db_index=True)
text = models.TextField(default="")
quote = models.TextField(default="")
range_start = models.CharField(max_length=2048) # xpath string
range_start_offset = models.IntegerField()
range_end = models.CharField(max_length=2048) # xpath string
range_end_offset = models.IntegerField()
tags = models.TextField(default="") # comma-separated string
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
updated = models.DateTimeField(auto_now=True, db_index=True)
class Meta:
app_label = 'notes'
def clean(self, json_body):
"""
Cleans the note object or raises a ValidationError.
"""
if json_body is None:
raise ValidationError('Note must have a body.')
body = json.loads(json_body)
if not isinstance(body, dict):
raise ValidationError('Note body must be a dictionary.')
# NOTE: all three of these fields should be considered user input
# and may be output back to the user, so we need to sanitize them.
# These fields should only contain _plain text_.
self.uri = strip_tags(body.get('uri', ''))
self.text = strip_tags(body.get('text', ''))
self.quote = strip_tags(body.get('quote', ''))
ranges = body.get('ranges')
if ranges is None or len(ranges) != 1:
raise ValidationError('Note must contain exactly one range.')
self.range_start = ranges[0]['start']
self.range_start_offset = ranges[0]['startOffset']
self.range_end = ranges[0]['end']
self.range_end_offset = ranges[0]['endOffset']
self.tags = ""
tags = [strip_tags(tag) for tag in body.get('tags', [])]
if len(tags) > 0:
self.tags = ",".join(tags)
def get_absolute_url(self):
"""
Returns the absolute url for the note object.
"""
# pylint: disable=no-member
kwargs = {'course_id': self.course_id.to_deprecated_string(), 'note_id': str(self.pk)}
return reverse('notes_api_note', kwargs=kwargs)
def as_dict(self):
"""
Returns the note object as a dictionary.
"""
return {
'id': self.pk,
'user_id': self.user.pk,
'uri': self.uri,
'text': self.text,
'quote': self.quote,
'ranges': [{
'start': self.range_start,
'startOffset': self.range_start_offset,
'end': self.range_end,
'endOffset': self.range_end_offset
}],
'tags': self.tags.split(","),
'created': str(self.created),
'updated': str(self.updated)
}
|
agpl-3.0
|
hammerlab/varlens
|
varlens/util.py
|
2
|
2981
|
# Copyright (c) 2015. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
def expand(value, arg_name, input_name, length):
if value is None or len(value) == 0:
return [None] * length
if len(value) == length:
return value
if len(value) == 1:
return value * length
if length == 1:
raise ValueError(
"With only 1 {input_name} specified, each {arg_name} argument "
"should be length 1. If you are trying to specify multiple filters"
" to apply consecutively, you should specify the entire argument "
"multiple times."
.format(
arg_name=arg_name,
input_name=input_name,
length=length,
actual=len(value)))
else:
raise ValueError(
"Expected argument {arg_name} to be length 1 (i.e. apply to all "
"{input_name} inputs) or length {length} (i.e. an individual value"
" for each of the {length} {input_name} inputs), not {actual}."
.format(
arg_name=arg_name,
input_name=input_name,
length=length,
actual=len(value)))
def drop_prefix(strings):
"""
Removes common prefix from a collection of strings
"""
strings_without_extensions = [
s.split(".", 2)[0] for s in strings
]
if len(strings_without_extensions) == 1:
return [os.path.basename(strings_without_extensions[0])]
prefix_len = len(os.path.commonprefix(strings_without_extensions))
result = [string[prefix_len:] for string in strings_without_extensions]
if len(set(result)) != len(strings):
# If these operations resulted in a collision, just return the original
# strings.
return strings
return result
class PrefixedArgumentParser(object):
def __init__(self, wrapped, prefix):
self.wrapped = wrapped
self.prefix = prefix
def add_argument(self, name, *args, **kwargs):
assert name.startswith("--")
new_name = "--" + self.prefix + "-" + name[2:]
self.wrapped.add_argument(new_name, *args, **kwargs)
def remove_prefix_from_parsed_args(args, prefix):
result = argparse.Namespace()
for (arg, value) in args._get_kwargs():
if arg.startswith(prefix + "_"):
setattr(result, arg[len(prefix + "_"):], value)
return result
|
apache-2.0
|
EricSchles/regulations-parser
|
tests/history_delays_tests.py
|
4
|
3576
|
from unittest import TestCase
from regparser.history.delays import *
class HistoryDelaysTests(TestCase):
def test_modify_effective_dates(self):
outdated = {'document_number': 'outdated',
'effective_on': '2001-01-01',
'publication_date': '2000-12-12',
'fr_volume': 12,
'meta': {'start_page': 500,
'end_page': 600,
'type': 'Rule',
'dates': 'Has an effective date of January 1, '
+ '2001'}}
unaltered = {'document_number': 'unaltered',
'effective_on': '2001-01-01',
'publication_date': '2000-12-20',
'fr_volume': 12,
'meta': {'start_page': 800,
'end_page': 900,
'type': 'Rule',
'dates': 'Effective date of January 1, 2001'}}
proposal = {'document_number': 'proposal',
'publication_date': '2000-12-21',
'fr_volume': 12,
'meta': {
'start_page': 1100,
'end_page': 1200,
'type': 'Proposed Rule',
'dates': 'We are thinking about delaying the '
+ 'effective date of 12 FR 501 to March 3, '
+ '2003'}}
changer = {'document_number': 'changer',
'publication_date': '2000-12-31',
'effective_on': '2000-12-31',
'fr_volume': 12,
'meta': {'start_page': 9000,
'end_page': 9005,
'type': 'Rule',
'dates': 'The effective date of 12 FR 501 has ' +
'been delayed until March 3, 2003'}}
modify_effective_dates([outdated, unaltered, proposal, changer])
self.assertEqual('2003-03-03', outdated['effective_on'])
self.assertEqual('2001-01-01', unaltered['effective_on'])
self.assertFalse('effective_on' in proposal)
self.assertEqual('2000-12-31', changer['effective_on'])
def test_overlaps_with(self):
fr = Notice(10, 225)
meta = lambda v, s, e: {'fr_volume': v, 'meta': {'start_page': s,
'end_page': e}}
self.assertTrue(overlaps_with(fr, meta(10, 220, 230)))
self.assertTrue(overlaps_with(fr, meta(10, 225, 230)))
self.assertTrue(overlaps_with(fr, meta(10, 220, 225)))
self.assertFalse(overlaps_with(fr, meta(11, 220, 230)))
self.assertFalse(overlaps_with(fr, meta(10, 226, 230)))
self.assertFalse(overlaps_with(fr, meta(10, 220, 224)))
def test_altered_frs(self):
sent = "The effective date of 12 FR 501, 13 FR 999, and (13 FR 764) "
sent += "has been delayed."
self.assertEqual(altered_frs(sent),
([Notice(12, 501), Notice(13, 999), Notice(13, 764)],
None))
sent = "In 11 FR 123 we delayed the effective date"
self.assertEqual(altered_frs(sent), ([], None))
sent = "The effective date of 9 FR 765 has been delayed until "
sent += "January 7, 2008; rather I mean March 4 2008"
self.assertEqual(altered_frs(sent),
([Notice(9, 765)], date(2008, 3, 4)))
|
cc0-1.0
|
LIMXTEC/BitSend
|
contrib/linearize/linearize.py
|
3
|
3351
|
#!/usr/bin/python
#
# linearize.py: Construct a linear, no-fork, best version of the blockchain.
#
#
# Copyright (c) 2013 The Bitsend developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
class BitsendRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def getblock(rpc, settings, n):
hash = rpc.getblockhash(n)
hexdata = rpc.getblock(hash, False)
data = hexdata.decode('hex')
return data
def get_blocks(settings):
rpc = BitsendRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
outf = open(settings['output'], 'ab')
for height in xrange(settings['min_height'], settings['max_height']+1):
data = getblock(rpc, settings, height)
outhdr = settings['netmagic']
outhdr += struct.pack("<i", len(data))
outf.write(outhdr)
outf.write(data)
if (height % 1000) == 0:
sys.stdout.write("Wrote block " + str(height) + "\n")
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'output' not in settings:
settings['output'] = 'bootstrap.dat'
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8800
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 279000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_blocks(settings)
|
mit
|
JayVora-SerpentCS/sale-reporting
|
__unported__/sale_note_flow/sale.py
|
5
|
1868
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2013 Camptocamp SA (http://www.camptocamp.com)
# @author Nicolas Bessi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
class SaleOrder(orm.Model):
_inherit = "sale.order"
def _prepare_invoice(self, cr, uid, order, lines, context=None):
res = super(SaleOrder, self)._prepare_invoice(cr, uid, order,
lines, context=context)
res.update({'note1': order.note1, 'note2': order.note2})
return res
class SaleOrderline(orm.Model):
_inherit = "sale.order.line"
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
res = super(SaleOrderline, self)._prepare_order_line_invoice_line(cr, uid, line,
account_id=False,
context=context)
res.update({'formatted_note': line.formatted_note})
return res
|
agpl-3.0
|
sbalde/edx-platform
|
lms/djangoapps/branding/models.py
|
84
|
1551
|
"""
Model used by Video module for Branding configuration.
Includes:
BrandingInfoConfig: A ConfigurationModel for managing how Video Module will
use Branding.
"""
import json
from django.db.models import TextField
from django.core.exceptions import ValidationError
from config_models.models import ConfigurationModel
class BrandingInfoConfig(ConfigurationModel):
"""
Configuration for Branding.
Example of configuration that must be stored:
{
"CN": {
"url": "http://www.xuetangx.com",
"logo_src": "http://www.xuetangx.com/static/images/logo.png",
"logo_tag": "Video hosted by XuetangX.com"
}
}
"""
configuration = TextField(
help_text="JSON data of Configuration for Video Branding."
)
def clean(self):
"""
Validates configuration text field.
"""
try:
json.loads(self.configuration)
except ValueError:
raise ValidationError('Must be valid JSON string.')
@classmethod
def get_config(cls):
"""
Get the Video Branding Configuration.
"""
info = cls.current()
return json.loads(info.configuration) if info.enabled else {}
class BrandingApiConfig(ConfigurationModel):
"""Configure Branding api's
Enable or disable api's functionality.
When this flag is disabled, the api will return 404.
When the flag is enabled, the api will returns the valid reponse.
"""
pass
|
agpl-3.0
|
noroutine/ansible
|
lib/ansible/modules/packaging/language/composer.py
|
7
|
8773
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Dimitrios Tydeas Mengidis <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: composer
author:
- "Dimitrios Tydeas Mengidis (@dmtrs)"
- "René Moser (@resmo)"
short_description: Dependency Manager for PHP
version_added: "1.6"
description:
- >
Composer is a tool for dependency management in PHP. It allows you to
declare the dependent libraries your project needs and it will install
them in your project for you.
options:
command:
version_added: "1.8"
description:
- Composer command like "install", "update" and so on.
default: install
arguments:
version_added: "2.0"
description:
- Composer arguments like required package, version and so on.
executable:
version_added: "2.4"
description:
- Path to PHP Executable on the remote host, if PHP is not in PATH.
aliases: [ php_path ]
working_dir:
description:
- Directory of your project (see --working-dir). This is required when
the command is not run globally.
- Will be ignored if C(global_command=true).
aliases: [ working-dir ]
global_command:
version_added: "2.4"
description:
- Runs the specified command globally.
choices: [ true, false]
default: false
aliases: [ global-command ]
prefer_source:
description:
- Forces installation from package sources when possible (see --prefer-source).
default: false
choices: [ true, false]
aliases: [ prefer-source ]
prefer_dist:
description:
- Forces installation from package dist even for dev versions (see --prefer-dist).
default: false
choices: [ true, false]
aliases: [ prefer-dist ]
no_dev:
description:
- Disables installation of require-dev packages (see --no-dev).
default: true
choices: [ true, false]
aliases: [ no-dev ]
no_scripts:
description:
- Skips the execution of all scripts defined in composer.json (see --no-scripts).
default: false
choices: [ true, false]
aliases: [ no-scripts ]
no_plugins:
description:
- Disables all plugins ( see --no-plugins ).
default: false
choices: [ true, false]
aliases: [ no-plugin ]
optimize_autoloader:
description:
- Optimize autoloader during autoloader dump (see --optimize-autoloader).
- Convert PSR-0/4 autoloading to classmap to get a faster autoloader.
- Recommended especially for production, but can take a bit of time to run.
default: true
choices: [ true, false]
aliases: [ optimize-autoloader ]
ignore_platform_reqs:
version_added: "2.0"
description:
- Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
default: false
choices: [ true, false]
aliases: [ ignore-platform-reqs ]
requirements:
- php
- composer installed in bin path (recommended /usr/local/bin)
notes:
- Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
- We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues.
'''
EXAMPLES = '''
# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock
- composer:
command: install
working_dir: /path/to/project
- composer:
command: require
arguments: my/package
working_dir: /path/to/project
# Clone project and install with all dependencies
- composer:
command: create-project
arguments: package/package /path/to/project ~1.0
working_dir: /path/to/project
prefer_dist: yes
# Installs package globally
- composer:
command: require
global_command: yes
arguments: my/package
'''
import re
from ansible.module_utils.basic import AnsibleModule
def parse_out(string):
return re.sub(r"\s+", " ", string).strip()
def has_changed(string):
return "Nothing to install or update" not in string
def get_available_options(module, command='install'):
# get all available options from a composer command using composer help to json
rc, out, err = composer_command(module, "help %s --format=json" % command)
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output)
command_help_json = module.from_json(out)
return command_help_json['definition']['options']
def composer_command(module, command, arguments="", options=None, global_command=False):
if options is None:
options = []
if module.params['executable'] is None:
php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
else:
php_path = module.params['executable']
composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments)
return module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(default="install", type="str"),
arguments=dict(default="", type="str"),
executable=dict(type="path", aliases=["php_path"]),
working_dir=dict(type="path", aliases=["working-dir"]),
global_command=dict(default=False, type="bool", aliases=["global-command"]),
prefer_source=dict(default=False, type="bool", aliases=["prefer-source"]),
prefer_dist=dict(default=False, type="bool", aliases=["prefer-dist"]),
no_dev=dict(default=True, type="bool", aliases=["no-dev"]),
no_scripts=dict(default=False, type="bool", aliases=["no-scripts"]),
no_plugins=dict(default=False, type="bool", aliases=["no-plugins"]),
optimize_autoloader=dict(default=True, type="bool", aliases=["optimize-autoloader"]),
ignore_platform_reqs=dict(default=False, type="bool", aliases=["ignore-platform-reqs"]),
),
required_if=[('global_command', False, ['working_dir'])],
supports_check_mode=True
)
# Get composer command with fallback to default
command = module.params['command']
if re.search(r"\s", command):
module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
arguments = module.params['arguments']
global_command = module.params['global_command']
available_options = get_available_options(module=module, command=command)
options = []
# Default options
default_options = [
'no-ansi',
'no-interaction',
'no-progress',
]
for option in default_options:
if option in available_options:
option = "--%s" % option
options.append(option)
if not global_command:
options.extend(['--working-dir', "'%s'" % module.params['working_dir']])
option_params = {
'prefer_source': 'prefer-source',
'prefer_dist': 'prefer-dist',
'no_dev': 'no-dev',
'no_scripts': 'no-scripts',
'no_plugins': 'no_plugins',
'optimize_autoloader': 'optimize-autoloader',
'ignore_platform_reqs': 'ignore-platform-reqs',
}
for param, option in option_params.items():
if module.params.get(param) and option in available_options:
option = "--%s" % option
options.append(option)
if module.check_mode:
if 'dry-run' in available_options:
options.append('--dry-run')
else:
module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command)
rc, out, err = composer_command(module, command, arguments, options, global_command)
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output, stdout=err)
else:
# Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
output = parse_out(out + err)
module.exit_json(changed=has_changed(output), msg=output, stdout=out + err)
if __name__ == '__main__':
main()
|
gpl-3.0
|
brion/ffmpeg2theora
|
frontend/theoraenc/addVideoDialog.py
|
3
|
15190
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=2:sts=2:ts=2
import os
from os.path import basename
import time
from addSubtitlesDialog import addSubtitlesPropertiesDialog, SubtitlesList
import wx
import theoraenc
class AddVideoDialog(wx.Dialog):
def __init__(
self, parent, ID, title, hasKate, hasIconv,
size=wx.DefaultSize, pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE,
):
self.videoFile = ''
self.hasKate = hasKate
self.hasIconv = hasIconv
pre = wx.PreDialog()
#pre.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP)
pre.Create(parent, ID, title, pos, size, style)
self.PostCreate(pre)
# Now continue with the normal construction of the dialog
padding = 4
section_padding=60
mainBox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
hbox.AddSpacer((8, 8))
#Video File
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
hbox.Add(wx.StaticText(self, -1, "Video File"), 0, wx.EXPAND|wx.ALL, 16)
self.btnVideoFile = wx.Button(self, size=(380, -1))
self.btnVideoFile.SetLabel('Select...')
self.Bind(wx.EVT_BUTTON, self.OnClickVideoFile, self.btnVideoFile)
hbox.Add(self.btnVideoFile, 0, wx.EXPAND|wx.ALL, padding)
#Quality
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
label = wx.StaticText(self, -1, "Video")
hbox.AddSpacer((12, 10))
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
hbox.AddSpacer((section_padding, 10))
label = wx.StaticText(self, -1, "Quality:")
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.videoquality = wx.TextCtrl(self, -1, '5.0', size=(32,-1))
hbox.Add(self.videoquality, 0, wx.EXPAND|wx.ALL, padding)
label = wx.StaticText(self, -1, "Bitrate (kbps):")
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.videobitrate = wx.TextCtrl(self, -1, '', size=(65,-1))
hbox.Add(self.videobitrate, 0, wx.EXPAND|wx.ALL, padding)
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
hbox.AddSpacer((section_padding, 10))
#Size
box=45
label = wx.StaticText(self, -1, "Size:")
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.width = wx.TextCtrl(self, -1, '', size=(65,-1))
hbox.Add(self.width, 0, wx.EXPAND|wx.ALL, padding)
label = wx.StaticText(self, -1, "x")
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.height = wx.TextCtrl(self, -1, '', size=(65,-1))
hbox.Add(self.height, 0, wx.EXPAND|wx.ALL, padding)
#Framerate
label = wx.StaticText(self, -1, "Framerate:")
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.framerate = wx.TextCtrl(self, -1, '', size=(40,-1))
hbox.Add(self.framerate, 0, wx.EXPAND|wx.ALL, padding)
#Crop
box=35
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
hbox.AddSpacer((section_padding, 10))
label = wx.StaticText(self, -1, "Crop:")
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
label = wx.StaticText(self, -1, "Top")
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.cropTop = wx.TextCtrl(self, -1, '', size=(box,-1))
hbox.Add(self.cropTop, 0, wx.EXPAND|wx.ALL, padding)
label = wx.StaticText(self, -1, "Left")
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.cropLeft = wx.TextCtrl(self, -1, '', size=(box,-1))
hbox.Add(self.cropLeft, 0, wx.EXPAND|wx.ALL, padding)
label = wx.StaticText(self, -1, "Bottom")
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.cropBottom = wx.TextCtrl(self, -1, '', size=(box,-1))
hbox.Add(self.cropBottom, 0, wx.EXPAND|wx.ALL, padding)
label = wx.StaticText(self, -1, "Right")
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.cropRight = wx.TextCtrl(self, -1, '', size=(box,-1))
hbox.Add(self.cropRight, 0, wx.EXPAND|wx.ALL, padding)
box=45
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
label = wx.StaticText(self, -1, "Audio")
hbox.AddSpacer((12, 10))
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
#Quality & Bitrate
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
hbox.AddSpacer((section_padding, 10))
label = wx.StaticText(self, -1, "Quality:")
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.audioquality = wx.TextCtrl(self, -1, '1.0', size=(32,-1))
hbox.Add(self.audioquality, 0, wx.EXPAND|wx.ALL, padding)
label = wx.StaticText(self, -1, "Bitrate (kbps):")
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.audiobitrate = wx.TextCtrl(self, -1, '', size=(box,-1))
hbox.Add(self.audiobitrate, 0, wx.EXPAND|wx.ALL, padding)
#Samplerate / Channels
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
hbox.AddSpacer((section_padding, 10))
label = wx.StaticText(self, -1, "Samplerate (Hz)")
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.samplerate = wx.TextCtrl(self, -1, '', size=(56,-1))
hbox.Add(self.samplerate, 0, wx.EXPAND|wx.ALL, padding)
label = wx.StaticText(self, -1, "Channels")
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.channels = wx.TextCtrl(self, -1, '', size=(24,-1))
hbox.Add(self.channels, 0, wx.EXPAND|wx.ALL, padding)
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
# subtitles ('add' button and list)
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
label = wx.StaticText(self, -1, "Subtitles")
hbox.AddSpacer((12, 10))
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
hbox.AddSpacer((section_padding, 10))
if hasKate:
vbox = wx.BoxSizer(wx.VERTICAL)
hbox.Add(vbox)
subtitlesButtons_hbox = wx.BoxSizer(wx.HORIZONTAL)
vbox.Add(subtitlesButtons_hbox)
self.btnSubtitlesAdd = wx.Button(self, size=(120, -1))
self.btnSubtitlesAdd.SetLabel('Add...')
self.Bind(wx.EVT_BUTTON, self.OnClickSubtitlesAdd, self.btnSubtitlesAdd)
subtitlesButtons_hbox.Add(self.btnSubtitlesAdd, 0, wx.EXPAND|wx.ALL, padding)
self.btnSubtitlesRemove = wx.Button(self, size=(120, -1))
self.btnSubtitlesRemove.SetLabel('Remove')
self.Bind(wx.EVT_BUTTON, self.OnClickSubtitlesRemove, self.btnSubtitlesRemove)
self.btnSubtitlesRemove.Disable()
subtitlesButtons_hbox.Add(self.btnSubtitlesRemove, 0, wx.EXPAND|wx.ALL, padding)
self.btnSubtitlesProperties = wx.Button(self, size=(120, -1))
self.btnSubtitlesProperties.SetLabel('Properties')
self.Bind(wx.EVT_BUTTON, self.OnClickSubtitlesProperties, self.btnSubtitlesProperties)
self.btnSubtitlesProperties.Disable()
subtitlesButtons_hbox.Add(self.btnSubtitlesProperties, 0, wx.EXPAND|wx.ALL, padding)
#self.subtitles = wx.ListCtrl(self, -1, style=wx.LC_REPORT)
self.subtitles = SubtitlesList(self)
self.subtitles.Bind(wx.EVT_LIST_ITEM_SELECTED, self.CheckSubtitlesSelection)
self.subtitles.Bind(wx.EVT_LEFT_DCLICK, self.OnClickSubtitlesProperties)
self.subtitles.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.CheckSubtitlesSelection)
self.subtitles.Bind(wx.EVT_KILL_FOCUS, self.CheckSubtitlesSelection)
vbox.Add(self.subtitles, 0, wx.EXPAND|wx.ALL, padding)
else:
self.subtitles = None
hbox.Add(wx.StaticText(self, -1, "ffmpeg2theora doesn't seem to be built with subtitles support.\nSee documentation for how to enable subtitles.\n"))
'''
#Metadata
label = wx.StaticText(self, -1, "Metadata")
hbox.AddSpacer((12, 10))
hbox.Add(label, 0, wx.EXPAND|wx.ALL, padding)
mbox=180
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
labels = wx.BoxSizer(wx.VERTICAL)
inputs = wx.BoxSizer(wx.VERTICAL)
hbox.AddSpacer((section_padding, 10))
hbox.Add(labels, 0, wx.ALIGN_RIGHT|wx.EXPAND|wx.ALL)
hbox.Add(inputs,0, wx.ALIGN_LEFT|wx.EXPAND|wx.ALL)
#Title
label = wx.StaticText(self, -1, "Title")
labels.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.title = wx.TextCtrl(self, -1, '', size=(mbox,-1))
inputs.Add(self.title, 0, wx.EXPAND|wx.ALL)
#Artist
label = wx.StaticText(self, -1, "Artist")
labels.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.artist = wx.TextCtrl(self, -1, '', size=(mbox,-1))
inputs.Add(self.artist, 0, wx.EXPAND|wx.ALL)
#date
label = wx.StaticText(self, -1, "Date", size=(mbox,-1))
labels.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.date = wx.TextCtrl(self, -1, '', size=(mbox,-1))
inputs.Add(self.date, 0, wx.EXPAND|wx.ALL)
#Location
label = wx.StaticText(self, -1, "Location")
labels.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.location = wx.TextCtrl(self, -1, '', size=(mbox,-1))
inputs.Add(self.location, 0, wx.EXPAND|wx.ALL)
#Organization
label = wx.StaticText(self, -1, "Organization")
labels.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.organization = wx.TextCtrl(self, -1, '', size=(mbox,-1))
inputs.Add(self.organization, 0, wx.EXPAND|wx.ALL)
#Copyright
label = wx.StaticText(self, -1, "Copyright")
labels.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.copyright = wx.TextCtrl(self, -1, '', size=(mbox,-1))
inputs.Add(self.copyright, 0, wx.EXPAND|wx.ALL)
#License
label = wx.StaticText(self, -1, "License")
labels.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.license = wx.TextCtrl(self, -1, '', size=(mbox,-1))
inputs.Add(self.license, 0, wx.EXPAND|wx.ALL)
#Contact
label = wx.StaticText(self, -1, "Contact")
labels.Add(label, 0, wx.EXPAND|wx.ALL, padding)
self.contact = wx.TextCtrl(self, -1, '', size=(mbox,-1))
inputs.Add(self.contact, 0, wx.EXPAND|wx.ALL)
'''
#Buttons
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
hbox.AddSpacer((8, 16))
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
hbox.AddSpacer((280, 10))
self.btnCancel = wx.Button(self, wx.ID_CANCEL)
self.btnCancel.SetLabel('Cancel')
hbox.Add(self.btnCancel, 0, wx.EXPAND|wx.ALL, padding)
self.btnOK = wx.Button(self, wx.ID_OK)
self.btnOK.SetDefault()
self.btnOK.Disable()
self.btnOK.SetLabel('Add to queue')
hbox.Add(self.btnOK, 0, wx.EXPAND|wx.ALL, padding)
hbox = wx.BoxSizer(wx.HORIZONTAL)
mainBox.Add(hbox)
hbox.AddSpacer((8, 8))
self.SetSizerAndFit(mainBox)
if parent.inputFile and os.path.exists(parent.inputFile):
self.selectVideoFile(parent.inputFile)
parent.inputFile = None
def OnClickVideoFile(self, event):
#transcoding later...
wildcard = "Video files|*.OGG;*.ogg;*.OGV;*.ogv;*.AVI;*.avi;*.mov;*.MOV;*.dv;*.DV;*.mp4;*.MP4;*.m4v;*.mpg;*.mpeg;*.wmv;*.MPG;*.flv;*.FLV|All Files (*.*)|*.*"
dialogOptions = dict()
dialogOptions['message'] = 'Add Video..'
dialogOptions['wildcard'] = wildcard
dialog = wx.FileDialog(self, **dialogOptions)
if dialog.ShowModal() == wx.ID_OK:
filename = dialog.GetFilename()
dirname = dialog.GetDirectory()
self.selectVideoFile(os.path.join(dirname, filename))
else:
filename=None
dialog.Destroy()
return filename
def selectVideoFile(self, videoFile):
self.info = theoraenc.fileInfo(videoFile)
if self.info:
#FIXME: enable/disable options based on input
"""
if "video" in self.info: #source has video
#enable video options
if "audio" in self.info: #source has audio
#enable audio options
if "audio" in self.info: #source has audio
"""
self.videoFile = videoFile
lValue = videoFile
lLength = 45
if len(lValue) > lLength:
lValue = "..." + lValue[-lLength:]
self.btnVideoFile.SetLabel(lValue)
self.btnOK.Enable()
def CheckSubtitlesSelection(self, event):
idx=self.subtitles.GetFirstSelected()
if idx<0:
self.btnSubtitlesRemove.Disable()
self.btnSubtitlesProperties.Disable()
else:
self.btnSubtitlesRemove.Enable()
self.btnSubtitlesProperties.Enable()
self.subtitles.ResizeFilenameColumn()
def OnClickSubtitlesAdd(self, event):
self.subtitles.Append(['', '', '', ''])
if not self.ChangeSubtitlesProperties(self.subtitles.GetItemCount()-1):
self.subtitles.DeleteItem(self.subtitles.GetItemCount()-1)
self.subtitles.ResizeFilenameColumn()
def OnClickSubtitlesRemove(self, event):
while 1:
idx=self.subtitles.GetFirstSelected()
if idx<0:
break
self.subtitles.DeleteItem(idx)
self.CheckSubtitlesSelection(event)
def OnClickSubtitlesProperties(self, event):
idx=self.subtitles.GetFirstSelected()
if idx<0:
return
self.ChangeSubtitlesProperties(idx)
def ChangeSubtitlesProperties(self, idx):
language = self.subtitles.GetItem(idx, 0).GetText()
category = self.subtitles.GetItem(idx, 1).GetText()
encoding = self.subtitles.GetItem(idx, 2).GetText()
file = self.subtitles.GetItem(idx, 3).GetText()
result = addSubtitlesPropertiesDialog(self, language, category, encoding, file, self.hasIconv)
time.sleep(0.5) # why ? race condition ?
if result['ok']:
self.subtitles.SetStringItem(idx, 0, result['subtitlesLanguage'])
self.subtitles.SetStringItem(idx, 1, result['subtitlesCategory'])
self.subtitles.SetStringItem(idx, 2, result['subtitlesEncoding'])
self.subtitles.SetStringItem(idx, 3, result['subtitlesFile'])
return True
else:
return False
def addVideoDialog(parent, hasKate, hasIconv):
dlg = AddVideoDialog(parent, -1, "Add Video", hasKate, hasIconv, size=(490, 560), style=wx.DEFAULT_DIALOG_STYLE)
dlg.CenterOnScreen()
val = dlg.ShowModal()
result = dict()
if val == wx.ID_OK:
result['ok'] = True
result['videoFile'] = dlg.videoFile
for key in ('width', 'height', 'videoquality', 'videobitrate', 'framerate',
'audioquality', 'audiobitrate', 'samplerate'):
result[key] = getattr(dlg, key).GetValue()
# subtitles
if dlg.subtitles:
for idx in range(dlg.subtitles.GetItemCount()):
if not 'subtitles' in result:
result['subtitles'] = []
language = dlg.subtitles.GetItem(idx, 0).GetText()
category = dlg.subtitles.GetItem(idx, 1).GetText()
encoding = dlg.subtitles.GetItem(idx, 2).GetText()
file = dlg.subtitles.GetItem(idx, 3).GetText()
result['subtitles'].append({'encoding':encoding, 'language':language, 'category':category, 'file':file})
else:
result['ok'] = False
dlg.Destroy()
return result
if __name__ == "__main__":
import sys
class Frame(wx.Frame):
inputFile = None
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, "add video test", size=(559,260))
self.Show(True)
app = wx.PySimpleApp()
frame=Frame()
if len(sys.argv) > 1:
frame.inputFile = sys.argv[1]
result = addVideoDialog(frame, True)
print result
|
gpl-2.0
|
vpalex999/python_training
|
model/address.py
|
1
|
3895
|
# -*- coding: utf-8 -*-
import re
from sys import maxsize
class Address:
def __init__(self, name=None, mname=None, lname=None, nickname=None,\
title=None, company=None, address=None, phone=None, mobile=None, workphone=None,\
fax=None, email=None, email2=None, email3=None, homepage=None,\
address2=None, phone2=None, notes=None,\
id=None, all_phones_from_home_page=None, \
all_email_from_home_page=None, \
all_address_from_home_page=None):
self.name = name
self.mname = mname
self.lname = lname
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.phone = phone
self.mobile = mobile
self.workphone = workphone
self.fax = fax
self.email = email
self.email2 = email2
self.email3 = email3
self.homepage = homepage
self.address2 = address2
self.phone2 = phone2
self.notes = notes
self.id = id
self.all_address_from_home_page = all_address_from_home_page
self.all_phones_from_home_page = all_phones_from_home_page
self.all_email_from_home_page = all_email_from_home_page
def id_or_max(self):
self.name = self.clear(self.name)
self.lname = self.clear(self.lname)
self.concat_phone()
self.concat_email()
if self.id:
return int(self.id)
else:
return maxsize
def __repr__(self):
return f"{self.name}, {self.lname}, {self.id}, {self.all_phones_from_home_page}, {self.all_email_from_home_page}"
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and\
(self.name is None or other.name is None or self.name == other.name) and\
(self.lname is None or other.lname is None or self.lname == other.lname) and\
(self.all_phones_from_home_page is None or other.all_phones_from_home_page is None or
self.all_phones_from_home_page == "" or other.all_phones_from_home_page == "" or
self.all_phones_from_home_page == other.all_phones_from_home_page) and\
(self.all_email_from_home_page is None or other.all_email_from_home_page is None or
self.all_email_from_home_page == "" or other.all_email_from_home_page == "" or
self.all_email_from_home_page == other.all_email_from_home_page)
def clear(self, s):
if s is not None:
return re.sub(" ", "", s)
def clear_p(self, s):
return re.sub("[() -]", "", s)
def clear_e(self, s):
return re.sub("[() ]", "", s)
def merge_phones_like_on_home_page(self):
return "\n".join(filter(lambda x: x != "",
map(lambda x: self.clear_p(x),
filter(lambda x: x is not None, [self.phone, self.mobile, self.workphone,
self.phone2]))))
def merge_email_like_on_home_page(self):
return "\n".join(filter(lambda x: x != "",
map(lambda x: self.clear_e(x),
filter(lambda x: x is not None, [self.email, self.email2, self.email3]))))
def concat_phone(self):
if self.all_phones_from_home_page is None or not len(self.all_phones_from_home_page):
self.all_phones_from_home_page = self.merge_phones_like_on_home_page()
def concat_email(self):
if self.all_email_from_home_page is None or not len(self.all_email_from_home_page):
self.all_email_from_home_page = self.merge_email_like_on_home_page()
self.all_email_from_home_page = self.clear(self.all_email_from_home_page)
|
apache-2.0
|
b3c/VTK-5.8
|
Common/Testing/Python/TestSubClass.py
|
18
|
3350
|
"""Test subclassing support in VTK-Python
VTK classes can be subclassed in Python. There are
some caveats, such as:
- protected items are inaccessible to the python class
- virtual method calls from C++ are not propagated to python
To be tested:
- make sure that subclassing works
- make sure that unbound superclass methods can be called
Created on Sept 26, 2010 by David Gobbi
"""
import sys
import exceptions
import vtk
from vtk.test import Testing
class vtkCustomObject(vtk.vtkObject):
def __init__(self):
"""Initialize all attributes."""
self._ExtraObject = vtk.vtkObject()
def GetClassName(self):
"""Get the class name."""
return self.__class__.__name__
def GetExtraObject(self):
"""Getter method."""
return self._ExtraObject
def SetExtraObject(self, o):
"""Setter method."""
# make sure it is "None" or a vtkobject instance
if o == None or type(o) == type(self):
self._ExtraObject = o
self.Modified()
else:
raise TypeError, "requires None or a vtkobject"
def GetMTime(self):
"""Override a method (only works when called from Python)"""
t = vtk.vtkObject.GetMTime(self)
if self._ExtraObject:
t = max(t, self._ExtraObject.GetMTime())
return t
class TestSubclass(Testing.vtkTest):
def testSubclassInstantiate(self):
"""Instantiate a python vtkObject subclass"""
o = vtkCustomObject()
self.assertEqual(o.GetClassName(), "vtkCustomObject")
def testCallUnboundMethods(self):
"""Test calling an unbound method in an overridded method"""
o = vtkCustomObject()
a = vtk.vtkIntArray()
o.SetExtraObject(a)
a.Modified()
# GetMTime should return a's mtime
self.assertEqual(o.GetMTime(), a.GetMTime())
# calling the vtkObject mtime should give a lower MTime
self.assertNotEqual(o.GetMTime(), vtk.vtkObject.GetMTime(o))
# another couple quick unbound method check
vtk.vtkDataArray.InsertNextTuple1(a, 2)
self.assertEqual(a.GetTuple1(0), 2)
def testPythonRTTI(self):
"""Test the python isinstance and issubclass methods """
o = vtkCustomObject()
d = vtk.vtkIntArray()
self.assertEqual(True, isinstance(o, vtk.vtkObjectBase))
self.assertEqual(True, isinstance(d, vtk.vtkObjectBase))
self.assertEqual(True, isinstance(o, vtkCustomObject))
self.assertEqual(False, isinstance(d, vtkCustomObject))
self.assertEqual(False, isinstance(o, vtk.vtkDataArray))
self.assertEqual(True, issubclass(vtkCustomObject, vtk.vtkObject))
self.assertEqual(False, issubclass(vtk.vtkObject, vtkCustomObject))
self.assertEqual(False, issubclass(vtkCustomObject, vtk.vtkDataArray))
def testSubclassGhost(self):
"""Make sure ghosting of the class works"""
o = vtkCustomObject()
c = vtk.vtkCollection()
c.AddItem(o)
i = id(o)
del o
o = vtk.vtkObject()
o = c.GetItemAsObject(0)
# make sure the id has changed, but class the same
self.assertEqual(o.__class__, vtkCustomObject)
self.assertNotEqual(i, id(o))
if __name__ == "__main__":
Testing.main([(TestSubclass, 'test')])
|
bsd-3-clause
|
henriquebastos/django-decouple
|
decouple.py
|
1
|
4877
|
# coding: utf-8
import os
import sys
# Useful for very coarse version differentiation.
PY3 = sys.version_info[0] == 3
if PY3:
from configparser import ConfigParser
else:
from ConfigParser import SafeConfigParser as ConfigParser
class UndefinedValueError(Exception):
pass
class Undefined(object):
"""
Class to represent undefined type.
"""
pass
# Reference instance to represent undefined values
undefined = Undefined()
class Config(object):
"""
Handle .env file format used by Foreman.
"""
_BOOLEANS = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, repository):
self.repository = repository
def _cast_boolean(self, value):
"""
Helper to convert config values to boolean as ConfigParser do.
"""
if value.lower() not in self._BOOLEANS:
raise ValueError('Not a boolean: %s' % value)
return self._BOOLEANS[value.lower()]
def get(self, option, default=undefined, cast=undefined):
"""
Return the value for option or default if defined.
"""
if option in self.repository:
value = self.repository.get(option)
else:
value = default
if isinstance(value, Undefined):
raise UndefinedValueError('%s option not found and default value was not defined.' % option)
if isinstance(cast, Undefined):
cast = lambda v: v # nop
elif cast is bool:
cast = self._cast_boolean
return cast(value)
def __call__(self, *args, **kwargs):
"""
Convenient shortcut to get.
"""
return self.get(*args, **kwargs)
class RepositoryBase(object):
def __init__(self, source):
raise NotImplementedError
def __contains__(self, key):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
class RepositoryIni(RepositoryBase):
"""
Retrieves option keys from .ini files.
"""
SECTION = 'settings'
def __init__(self, source):
self.parser = ConfigParser()
self.parser.readfp(open(source))
def __contains__(self, key):
return self.parser.has_option(self.SECTION, key)
def get(self, key):
return self.parser.get(self.SECTION, key)
class RepositoryEnv(RepositoryBase):
"""
Retrieves option keys from .env files with fall back to os.environ.
"""
def __init__(self, source):
self.data = {}
for line in open(source):
line = line.strip()
if not line or line.startswith('#') or '=' not in line:
continue
k, v = line.split('=', 1)
v = v.strip("'").strip('"')
self.data[k] = v
def __contains__(self, key):
return key in self.data or key in os.environ
def get(self, key):
return self.data.get(key) or os.environ[key]
class RepositoryShell(RepositoryBase):
"""
Retrieves option keys from os.environ.
"""
def __init__(self, source=None):
pass
def __contains__(self, key):
return key in os.environ
def get(self, key):
return os.environ[key]
class AutoConfig(object):
"""
Autodetects the config file and type.
"""
SUPPORTED = {
'settings.ini': RepositoryIni,
'.env': RepositoryEnv,
}
def __init__(self):
self.config = None
def _find_file(self, path):
# look for all files in the current path
for configfile in self.SUPPORTED:
filename = os.path.join(path, configfile)
if os.path.exists(filename):
return filename
# search the parent
parent = os.path.dirname(path)
if parent and parent != os.path.sep:
return self._find_file(parent)
# reached root without finding any files.
return ''
def _load(self, path):
# Avoid unintended permission errors
try:
filename = self._find_file(path)
except Exception:
filename = ''
Repository = self.SUPPORTED.get(os.path.basename(filename))
if not Repository:
Repository = RepositoryShell
self.config = Config(Repository(filename))
def _caller_path(self):
# MAGIC! Get the caller's module path.
frame = sys._getframe()
path = os.path.dirname(frame.f_back.f_back.f_code.co_filename)
return path
def __call__(self, *args, **kwargs):
if not self.config:
self._load(self._caller_path())
return self.config(*args, **kwargs)
# A pré-instantiated AutoConfig to improve decouple's usability
# now just import config and start using with no configuration.
config = AutoConfig()
|
mit
|
hyperNURb/ggrc-core
|
src/ggrc_workflows/migrations/versions/20150403101250_4f9f00e4faca_fill_object_types_table_with_workflow_.py
|
4
|
1454
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
"""fill object types table with workflow models
Revision ID: 4f9f00e4faca
Revises: 57cc398ad417
Create Date: 2015-04-03 10:12:50.583661
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
# revision identifiers, used by Alembic.
revision = '4f9f00e4faca'
down_revision = '8e530ce276a'
def upgrade():
object_types_table = table(
'object_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('created_at', sa.DateTime),
column('modified_by_id', sa.Integer),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
op.bulk_insert(
object_types_table,
[
{"name": "Workflow", "description": ""},
{"name": "TaskGroup", "description": ""},
{"name": "TaskGroupTask", "description": ""},
{"name": "TaskGroupObject", "description": ""},
{"name": "Cycle", "description": ""},
{"name": "CycleTaskGroup", "description": ""},
{"name": "CycleTaskGroupObject", "description": ""},
{"name": "CycleTaskGroupObjectTask", "description": ""},
]
)
def downgrade():
pass
|
apache-2.0
|
mitchcapper/mythbox
|
resources/lib/twisted/twisted/persisted/crefutil.py
|
61
|
4644
|
# -*- test-case-name: twisted.test.test_persisted -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Utility classes for dealing with circular references.
"""
from twisted.python import log, reflect
try:
from new import instancemethod
except:
from org.python.core import PyMethod
instancemethod = PyMethod
class NotKnown:
def __init__(self):
self.dependants = []
self.resolved = 0
def addDependant(self, mutableObject, key):
assert not self.resolved
self.dependants.append( (mutableObject, key) )
resolvedObject = None
def resolveDependants(self, newObject):
self.resolved = 1
self.resolvedObject = newObject
for mut, key in self.dependants:
mut[key] = newObject
if isinstance(newObject, NotKnown):
newObject.addDependant(mut, key)
def __hash__(self):
assert 0, "I am not to be used as a dictionary key."
class _Container(NotKnown):
"""
Helper class to resolve circular references on container objects.
"""
def __init__(self, l, containerType):
"""
@param l: The list of object which may contain some not yet referenced
objects.
@param containerType: A type of container objects (e.g., C{tuple} or
C{set}).
"""
NotKnown.__init__(self)
self.containerType = containerType
self.l = l
self.locs = range(len(l))
for idx in xrange(len(l)):
if not isinstance(l[idx], NotKnown):
self.locs.remove(idx)
else:
l[idx].addDependant(self, idx)
if not self.locs:
self.resolveDependants(self.containerType(self.l))
def __setitem__(self, n, obj):
"""
Change the value of one contained objects, and resolve references if
all objects have been referenced.
"""
self.l[n] = obj
if not isinstance(obj, NotKnown):
self.locs.remove(n)
if not self.locs:
self.resolveDependants(self.containerType(self.l))
class _Tuple(_Container):
"""
Manage tuple containing circular references. Deprecated: use C{_Container}
instead.
"""
def __init__(self, l):
"""
@param l: The list of object which may contain some not yet referenced
objects.
"""
_Container.__init__(self, l, tuple)
class _InstanceMethod(NotKnown):
def __init__(self, im_name, im_self, im_class):
NotKnown.__init__(self)
self.my_class = im_class
self.name = im_name
# im_self _must_ be a
im_self.addDependant(self, 0)
def __call__(self, *args, **kw):
import traceback
log.msg('instance method %s.%s' % (reflect.qual(self.my_class), self.name))
log.msg('being called with %r %r' % (args, kw))
traceback.print_stack(file=log.logfile)
assert 0
def __setitem__(self, n, obj):
assert n == 0, "only zero index allowed"
if not isinstance(obj, NotKnown):
self.resolveDependants(instancemethod(self.my_class.__dict__[self.name],
obj,
self.my_class))
class _DictKeyAndValue:
def __init__(self, dict):
self.dict = dict
def __setitem__(self, n, obj):
if n not in (1, 0):
raise RuntimeError("DictKeyAndValue should only ever be called with 0 or 1")
if n: # value
self.value = obj
else:
self.key = obj
if hasattr(self, "key") and hasattr(self, "value"):
self.dict[self.key] = self.value
class _Dereference(NotKnown):
def __init__(self, id):
NotKnown.__init__(self)
self.id = id
from twisted.internet.defer import Deferred
class _Catcher:
def catch(self, value):
self.value = value
class _Defer(Deferred, NotKnown):
def __init__(self):
Deferred.__init__(self)
NotKnown.__init__(self)
self.pause()
wasset = 0
def __setitem__(self, n, obj):
if self.wasset:
raise RuntimeError('setitem should only be called once, setting %r to %r' % (n, obj))
else:
self.wasset = 1
self.callback(obj)
def addDependant(self, dep, key):
# by the time I'm adding a dependant, I'm *not* adding any more
# callbacks
NotKnown.addDependant(self, dep, key)
self.unpause()
resovd = self.result
self.resolveDependants(resovd)
|
gpl-2.0
|
ApuliaSoftware/odoo
|
addons/crm_claim/report/crm_claim_report.py
|
180
|
4664
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp import tools
AVAILABLE_PRIORITIES = [
('0', 'Low'),
('1', 'Normal'),
('2', 'High')
]
class crm_claim_report(osv.osv):
""" CRM Claim Report"""
_name = "crm.claim.report"
_auto = False
_description = "CRM Claim Report"
_columns = {
'user_id':fields.many2one('res.users', 'User', readonly=True),
'section_id':fields.many2one('crm.case.section', 'Section', readonly=True),
'nbr': fields.integer('# of Claims', readonly=True), # TDE FIXME master: rename into nbr_claims
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'claim_date': fields.datetime('Claim Date', readonly=True),
'delay_close': fields.float('Delay to close', digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"),
'stage_id': fields.many2one ('crm.claim.stage', 'Stage', readonly=True,domain="[('section_ids','=',section_id)]"),
'categ_id': fields.many2one('crm.case.categ', 'Category',\
domain="[('section_id','=',section_id),\
('object_id.model', '=', 'crm.claim')]", readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'priority': fields.selection(AVAILABLE_PRIORITIES, 'Priority'),
'type_action': fields.selection([('correction','Corrective Action'),('prevention','Preventive Action')], 'Action Type'),
'date_closed': fields.datetime('Close Date', readonly=True, select=True),
'date_deadline': fields.date('Deadline', readonly=True, select=True),
'delay_expected': fields.float('Overpassed Deadline',digits=(16,2),readonly=True, group_operator="avg"),
'email': fields.integer('# Emails', size=128, readonly=True),
'subject': fields.char('Claim Subject', readonly=True)
}
def init(self, cr):
""" Display Number of cases And Section Name
@param cr: the current row, from the database cursor,
"""
tools.drop_view_if_exists(cr, 'crm_claim_report')
cr.execute("""
create or replace view crm_claim_report as (
select
min(c.id) as id,
c.date as claim_date,
c.date_closed as date_closed,
c.date_deadline as date_deadline,
c.user_id,
c.stage_id,
c.section_id,
c.partner_id,
c.company_id,
c.categ_id,
c.name as subject,
count(*) as nbr,
c.priority as priority,
c.type_action as type_action,
c.create_date as create_date,
avg(extract('epoch' from (c.date_closed-c.create_date)))/(3600*24) as delay_close,
(SELECT count(id) FROM mail_message WHERE model='crm.claim' AND res_id=c.id) AS email,
extract('epoch' from (c.date_deadline - c.date_closed))/(3600*24) as delay_expected
from
crm_claim c
group by c.date,\
c.user_id,c.section_id, c.stage_id,\
c.categ_id,c.partner_id,c.company_id,c.create_date,
c.priority,c.type_action,c.date_deadline,c.date_closed,c.id
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
TNT-Samuel/Coding-Projects
|
DNS Server/Source - Copy/Lib/site-packages/dask/array/rechunk.py
|
2
|
21831
|
"""
The rechunk module defines:
intersect_chunks: a function for
converting chunks to new dimensions
rechunk: a function to convert the blocks
of an existing dask array to new chunks or blockshape
"""
from __future__ import absolute_import, division, print_function
import math
import heapq
from itertools import product, chain, count
from operator import getitem, add, mul, itemgetter
import numpy as np
import toolz
from toolz import accumulate, reduce
from ..base import tokenize
from ..utils import parse_bytes
from .core import concatenate3, Array, normalize_chunks
from .wrap import empty
from .. import config, sharedict
def cumdims_label(chunks, const):
""" Internal utility for cumulative sum with label.
>>> cumdims_label(((5, 3, 3), (2, 2, 1)), 'n') # doctest: +NORMALIZE_WHITESPACE
[(('n', 0), ('n', 5), ('n', 8), ('n', 11)),
(('n', 0), ('n', 2), ('n', 4), ('n', 5))]
"""
return [tuple(zip((const,) * (1 + len(bds)),
accumulate(add, (0,) + bds)))
for bds in chunks]
def _breakpoints(cumold, cumnew):
"""
>>> new = cumdims_label(((2, 3), (2, 2, 1)), 'n')
>>> old = cumdims_label(((2, 2, 1), (5,)), 'o')
>>> _breakpoints(new[0], old[0])
(('n', 0), ('o', 0), ('n', 2), ('o', 2), ('o', 4), ('n', 5), ('o', 5))
>>> _breakpoints(new[1], old[1])
(('n', 0), ('o', 0), ('n', 2), ('n', 4), ('n', 5), ('o', 5))
"""
return tuple(sorted(cumold + cumnew, key=itemgetter(1)))
def _intersect_1d(breaks):
"""
Internal utility to intersect chunks for 1d after preprocessing.
>>> new = cumdims_label(((2, 3), (2, 2, 1)), 'n')
>>> old = cumdims_label(((2, 2, 1), (5,)), 'o')
>>> _intersect_1d(_breakpoints(old[0], new[0])) # doctest: +NORMALIZE_WHITESPACE
[[(0, slice(0, 2, None))],
[(1, slice(0, 2, None)), (2, slice(0, 1, None))]]
>>> _intersect_1d(_breakpoints(old[1], new[1])) # doctest: +NORMALIZE_WHITESPACE
[[(0, slice(0, 2, None))],
[(0, slice(2, 4, None))],
[(0, slice(4, 5, None))]]
Parameters
----------
breaks: list of tuples
Each tuple is ('o', 8) or ('n', 8)
These are pairs of 'o' old or new 'n'
indicator with a corresponding cumulative sum.
Uses 'o' and 'n' to make new tuples of slices for
the new block crosswalk to old blocks.
"""
start = 0
last_end = 0
old_idx = 0
ret = []
ret_next = []
for idx in range(1, len(breaks)):
label, br = breaks[idx]
last_label, last_br = breaks[idx - 1]
if last_label == 'n':
if ret_next:
ret.append(ret_next)
ret_next = []
if last_label == 'o':
start = 0
else:
start = last_end
end = br - last_br + start
last_end = end
if br == last_br:
continue
ret_next.append((old_idx, slice(start, end)))
if label == 'o':
old_idx += 1
start = 0
if ret_next:
ret.append(ret_next)
return ret
def _old_to_new(old_chunks, new_chunks):
""" Helper to build old_chunks to new_chunks.
Handles missing values, as long as the missing dimension
is unchanged.
Examples
--------
>>> old = ((10, 10, 10, 10, 10), )
>>> new = ((25, 5, 20), )
>>> _old_to_new(old, new) # doctest: +NORMALIZE_WHITESPACE
[[[(0, slice(0, 10, None)), (1, slice(0, 10, None)), (2, slice(0, 5, None))],
[(2, slice(5, 10, None))],
[(3, slice(0, 10, None)), (4, slice(0, 10, None))]]]
"""
old_known = [x for x in old_chunks if not any(math.isnan(y) for y in x)]
new_known = [x for x in new_chunks if not any(math.isnan(y) for y in x)]
n_missing = [sum(math.isnan(y) for y in x) for x in old_chunks]
n_missing2 = [sum(math.isnan(y) for y in x) for x in new_chunks]
cmo = cumdims_label(old_known, 'o')
cmn = cumdims_label(new_known, 'n')
sums = [sum(o) for o in old_known]
sums2 = [sum(n) for n in new_known]
if not sums == sums2:
raise ValueError('Cannot change dimensions from %r to %r' % (sums, sums2))
if not n_missing == n_missing2:
raise ValueError('Chunks must be unchanging along unknown dimensions')
old_to_new = [_intersect_1d(_breakpoints(cm[0], cm[1])) for cm in zip(cmo, cmn)]
for idx, missing in enumerate(n_missing):
if missing:
# Missing dimensions are always unchanged, so old -> new is everything
extra = [[(i, slice(0, None))] for i in range(missing)]
old_to_new.insert(idx, extra)
return old_to_new
def intersect_chunks(old_chunks, new_chunks):
"""
Make dask.array slices as intersection of old and new chunks.
>>> intersections = intersect_chunks(((4, 4), (2,)),
... ((8,), (1, 1)))
>>> list(intersections) # doctest: +NORMALIZE_WHITESPACE
[(((0, slice(0, 4, None)), (0, slice(0, 1, None))),
((1, slice(0, 4, None)), (0, slice(0, 1, None)))),
(((0, slice(0, 4, None)), (0, slice(1, 2, None))),
((1, slice(0, 4, None)), (0, slice(1, 2, None))))]
Parameters
----------
old_chunks : iterable of tuples
block sizes along each dimension (convert from old_chunks)
new_chunks: iterable of tuples
block sizes along each dimension (converts to new_chunks)
"""
old_to_new = _old_to_new(old_chunks, new_chunks)
cross1 = product(*old_to_new)
cross = chain(tuple(product(*cr)) for cr in cross1)
return cross
def rechunk(x, chunks, threshold=None,
block_size_limit=None):
"""
Convert blocks in dask array x for new chunks.
Parameters
----------
x: dask array
Array to be rechunked.
chunks: int, tuple or dict
The new block dimensions to create. -1 indicates the full size of the
corresponding dimension.
threshold: int
The graph growth factor under which we don't bother introducing an
intermediate step.
block_size_limit: int
The maximum block size (in bytes) we want to produce
Defaults to the configuration value ``array.chunk-size``
Examples
--------
>>> import dask.array as da
>>> x = da.ones((1000, 1000), chunks=(100, 100))
Specify uniform chunk sizes with a tuple
>>> y = x.rechunk((1000, 10))
Or chunk only specific dimensions with a dictionary
>>> y = x.rechunk({0: 1000})
Use the value ``-1`` to specify that you want a single chunk along a
dimension or the value ``"auto"`` to specify that dask can freely rechunk a
dimension to attain blocks of a uniform block size
>>> y = x.rechunk({0: -1, 1: 'auto'}, block_size_limit=1e8)
"""
if isinstance(chunks, dict):
chunks = dict(chunks)
for i in range(x.ndim):
if i not in chunks:
chunks[i] = x.chunks[i]
if isinstance(chunks, (tuple, list)):
chunks = tuple(lc if lc is not None else rc
for lc, rc in zip(chunks, x.chunks))
chunks = normalize_chunks(chunks, x.shape, limit=block_size_limit,
dtype=x.dtype, previous_chunks=x.chunks)
if chunks == x.chunks:
return x
ndim = x.ndim
if not len(chunks) == ndim:
raise ValueError("Provided chunks are not consistent with shape")
new_shapes = tuple(map(sum, chunks))
for new, old in zip(new_shapes, x.shape):
if new != old and not math.isnan(old) and not math.isnan(new):
raise ValueError("Provided chunks are not consistent with shape")
steps = plan_rechunk(x.chunks, chunks, x.dtype.itemsize,
threshold, block_size_limit)
for c in steps:
x = _compute_rechunk(x, c)
return x
def _number_of_blocks(chunks):
return reduce(mul, map(len, chunks))
def _largest_block_size(chunks):
return reduce(mul, map(max, chunks))
def estimate_graph_size(old_chunks, new_chunks):
""" Estimate the graph size during a rechunk computation.
"""
# Estimate the number of intermediate blocks that will be produced
# (we don't use intersect_chunks() which is much more expensive)
crossed_size = reduce(mul, (len(oc) + len(nc)
for oc, nc in zip(old_chunks, new_chunks)))
return crossed_size
def divide_to_width(desired_chunks, max_width):
""" Minimally divide the given chunks so as to make the largest chunk
width less or equal than *max_width*.
"""
chunks = []
for c in desired_chunks:
nb_divides = int(np.ceil(c / max_width))
for i in range(nb_divides):
n = c // (nb_divides - i)
chunks.append(n)
c -= n
assert c == 0
return tuple(chunks)
def merge_to_number(desired_chunks, max_number):
""" Minimally merge the given chunks so as to drop the number of
chunks below *max_number*, while minimizing the largest width.
"""
if len(desired_chunks) <= max_number:
return desired_chunks
distinct = set(desired_chunks)
if len(distinct) == 1:
# Fast path for homogeneous target, also ensuring a regular result
w = distinct.pop()
n = len(desired_chunks)
total = n * w
desired_width = total // max_number
width = w * (desired_width // w)
adjust = (total - max_number * width) // w
return (width + w,) * adjust + (width,) * (max_number - adjust)
desired_width = sum(desired_chunks) // max_number
nmerges = len(desired_chunks) - max_number
heap = [(desired_chunks[i] + desired_chunks[i + 1], i, i + 1)
for i in range(len(desired_chunks) - 1)]
heapq.heapify(heap)
chunks = list(desired_chunks)
while nmerges > 0:
# Find smallest interval to merge
width, i, j = heapq.heappop(heap)
# If interval was made invalid by another merge, recompute
# it, re-insert it and retry.
if chunks[j] == 0:
j += 1
while chunks[j] == 0:
j += 1
heapq.heappush(heap, (chunks[i] + chunks[j], i, j))
continue
elif chunks[i] + chunks[j] != width:
heapq.heappush(heap, (chunks[i] + chunks[j], i, j))
continue
# Merge
assert chunks[i] != 0
chunks[i] = 0 # mark deleted
chunks[j] = width
nmerges -= 1
return tuple(filter(None, chunks))
def find_merge_rechunk(old_chunks, new_chunks, block_size_limit):
"""
Find an intermediate rechunk that would merge some adjacent blocks
together in order to get us nearer the *new_chunks* target, without
violating the *block_size_limit* (in number of elements).
"""
ndim = len(old_chunks)
old_largest_width = [max(c) for c in old_chunks]
new_largest_width = [max(c) for c in new_chunks]
graph_size_effect = {
dim: len(nc) / len(oc)
for dim, (oc, nc) in enumerate(zip(old_chunks, new_chunks))
}
block_size_effect = {
dim: new_largest_width[dim] / (old_largest_width[dim] or 1)
for dim in range(ndim)
}
# Our goal is to reduce the number of nodes in the rechunk graph
# by merging some adjacent chunks, so consider dimensions where we can
# reduce the # of chunks
merge_candidates = [dim for dim in range(ndim)
if graph_size_effect[dim] <= 1.0]
# Merging along each dimension reduces the graph size by a certain factor
# and increases memory largest block size by a certain factor.
# We want to optimize the graph size while staying below the given
# block_size_limit. This is in effect a knapsack problem, except with
# multiplicative values and weights. Just use a greedy algorithm
# by trying dimensions in decreasing value / weight order.
def key(k):
gse = graph_size_effect[k]
bse = block_size_effect[k]
if bse == 1:
bse = 1 + 1e-9
return (np.log(gse) / np.log(bse)) if bse > 0 else 0
sorted_candidates = sorted(merge_candidates, key=key)
largest_block_size = reduce(mul, old_largest_width)
chunks = list(old_chunks)
memory_limit_hit = False
for dim in sorted_candidates:
# Examine this dimension for possible graph reduction
new_largest_block_size = (
largest_block_size * new_largest_width[dim] // (old_largest_width[dim] or 1))
if new_largest_block_size <= block_size_limit:
# Full replacement by new chunks is possible
chunks[dim] = new_chunks[dim]
largest_block_size = new_largest_block_size
else:
# Try a partial rechunk, dividing the new chunks into
# smaller pieces
largest_width = old_largest_width[dim]
chunk_limit = int(block_size_limit * largest_width / largest_block_size)
c = divide_to_width(new_chunks[dim], chunk_limit)
if len(c) <= len(old_chunks[dim]):
# We manage to reduce the number of blocks, so do it
chunks[dim] = c
largest_block_size = largest_block_size * max(c) // largest_width
memory_limit_hit = True
assert largest_block_size == _largest_block_size(chunks)
assert largest_block_size <= block_size_limit
return tuple(chunks), memory_limit_hit
def find_split_rechunk(old_chunks, new_chunks, graph_size_limit):
"""
Find an intermediate rechunk that would split some chunks to
get us nearer *new_chunks*, without violating the *graph_size_limit*.
"""
ndim = len(old_chunks)
chunks = list(old_chunks)
for dim in range(ndim):
graph_size = estimate_graph_size(chunks, new_chunks)
if graph_size > graph_size_limit:
break
if len(old_chunks[dim]) > len(new_chunks[dim]):
# It's not interesting to split
continue
# Merge the new chunks so as to stay within the graph size budget
max_number = int(len(old_chunks[dim]) * graph_size_limit / graph_size)
c = merge_to_number(new_chunks[dim], max_number)
assert len(c) <= max_number
# Consider the merge successful if its result has a greater length
# and smaller max width than the old chunks
if len(c) >= len(old_chunks[dim]) and max(c) <= max(old_chunks[dim]):
chunks[dim] = c
return tuple(chunks)
def plan_rechunk(old_chunks, new_chunks, itemsize,
threshold=None,
block_size_limit=None):
""" Plan an iterative rechunking from *old_chunks* to *new_chunks*.
The plan aims to minimize the rechunk graph size.
Parameters
----------
itemsize: int
The item size of the array
threshold: int
The graph growth factor under which we don't bother
introducing an intermediate step
block_size_limit: int
The maximum block size (in bytes) we want to produce during an
intermediate step
Notes
-----
No intermediate steps will be planned if any dimension of ``old_chunks``
is unknown.
"""
threshold = threshold or config.get('array.rechunk-threshold')
block_size_limit = block_size_limit or config.get('array.chunk-size')
if isinstance(block_size_limit, str):
block_size_limit = parse_bytes(block_size_limit)
ndim = len(new_chunks)
steps = []
has_nans = [any(math.isnan(y) for y in x) for x in old_chunks]
if ndim <= 1 or not all(new_chunks) or any(has_nans):
# Trivial array / unknown dim => no need / ability for an intermediate
return steps + [new_chunks]
# Make it a number ef elements
block_size_limit /= itemsize
# Fix block_size_limit if too small for either old_chunks or new_chunks
largest_old_block = _largest_block_size(old_chunks)
largest_new_block = _largest_block_size(new_chunks)
block_size_limit = max([block_size_limit,
largest_old_block,
largest_new_block,
])
# The graph size above which to optimize
graph_size_threshold = threshold * (_number_of_blocks(old_chunks) +
_number_of_blocks(new_chunks))
current_chunks = old_chunks
first_pass = True
while True:
graph_size = estimate_graph_size(current_chunks, new_chunks)
if graph_size < graph_size_threshold:
break
if first_pass:
chunks = current_chunks
else:
# We hit the block_size_limit in a previous merge pass =>
# accept a significant increase in graph size in exchange for
# 1) getting nearer the goal 2) reducing the largest block size
# to make place for the following merge.
# To see this pass in action, make the block_size_limit very small.
chunks = find_split_rechunk(current_chunks, new_chunks,
graph_size * threshold)
chunks, memory_limit_hit = find_merge_rechunk(chunks, new_chunks,
block_size_limit)
if (chunks == current_chunks and not first_pass) or chunks == new_chunks:
break
steps.append(chunks)
current_chunks = chunks
if not memory_limit_hit:
break
first_pass = False
return steps + [new_chunks]
def _compute_rechunk(x, chunks):
""" Compute the rechunk of *x* to the given *chunks*.
"""
if x.size == 0:
# Special case for empty array, as the algorithm below does not behave correctly
return empty(x.shape, chunks=chunks, dtype=x.dtype)
ndim = x.ndim
crossed = intersect_chunks(x.chunks, chunks)
x2 = dict()
intermediates = dict()
token = tokenize(x, chunks)
merge_temp_name = 'rechunk-merge-' + token
split_temp_name = 'rechunk-split-' + token
split_name_suffixes = count()
# Pre-allocate old block references, to allow re-use and reduce the
# graph's memory footprint a bit.
old_blocks = np.empty([len(c) for c in x.chunks], dtype='O')
for index in np.ndindex(old_blocks.shape):
old_blocks[index] = (x.name,) + index
# Iterate over all new blocks
new_index = product(*(range(len(c)) for c in chunks))
for new_idx, cross1 in zip(new_index, crossed):
key = (merge_temp_name,) + new_idx
old_block_indices = [[cr[i][0] for cr in cross1] for i in range(ndim)]
subdims1 = [len(set(old_block_indices[i]))
for i in range(ndim)]
rec_cat_arg = np.empty(subdims1, dtype='O')
rec_cat_arg_flat = rec_cat_arg.flat
# Iterate over the old blocks required to build the new block
for rec_cat_index, ind_slices in enumerate(cross1):
old_block_index, slices = zip(*ind_slices)
name = (split_temp_name, next(split_name_suffixes))
old_index = old_blocks[old_block_index][1:]
if all(slc.start == 0 and slc.stop == x.chunks[i][ind]
for i, (slc, ind) in enumerate(zip(slices, old_index))):
rec_cat_arg_flat[rec_cat_index] = old_blocks[old_block_index]
else:
intermediates[name] = (getitem, old_blocks[old_block_index], slices)
rec_cat_arg_flat[rec_cat_index] = name
assert rec_cat_index == rec_cat_arg.size - 1
# New block is formed by concatenation of sliced old blocks
if all(d == 1 for d in rec_cat_arg.shape):
x2[key] = rec_cat_arg.flat[0]
else:
x2[key] = (concatenate3, rec_cat_arg.tolist())
del old_blocks, new_index
x2 = sharedict.merge(x.dask, (merge_temp_name, toolz.merge(x2, intermediates)))
return Array(x2, merge_temp_name, chunks, dtype=x.dtype)
class _PrettyBlocks(object):
def __init__(self, blocks):
self.blocks = blocks
def __str__(self):
runs = []
run = []
repeats = 0
for c in self.blocks:
if run and run[-1] == c:
if repeats == 0 and len(run) > 1:
runs.append((None, run[:-1]))
run = run[-1:]
repeats += 1
else:
if repeats > 0:
assert len(run) == 1
runs.append((repeats + 1, run[-1]))
run = []
repeats = 0
run.append(c)
if run:
if repeats == 0:
runs.append((None, run))
else:
assert len(run) == 1
runs.append((repeats + 1, run[-1]))
parts = []
for repeats, run in runs:
if repeats is None:
parts.append(str(run))
else:
parts.append("%d*[%s]" % (repeats, run))
return " | ".join(parts)
__repr__ = __str__
def format_blocks(blocks):
"""
Pretty-format *blocks*.
>>> format_blocks((10, 10, 10))
3*[10]
>>> format_blocks((2, 3, 4))
[2, 3, 4]
>>> format_blocks((10, 10, 5, 6, 2, 2, 2, 7))
2*[10] | [5, 6] | 3*[2] | [7]
"""
assert (isinstance(blocks, tuple) and
all(isinstance(x, int) or math.isnan(x)
for x in blocks))
return _PrettyBlocks(blocks)
def format_chunks(chunks):
"""
>>> format_chunks((10 * (3,), 3 * (10,)))
(10*[3], 3*[10])
"""
assert isinstance(chunks, tuple)
return tuple(format_blocks(c) for c in chunks)
def format_plan(plan):
"""
>>> format_plan([((10, 10, 10), (15, 15)), ((30,), (10, 10, 10))])
[(3*[10], 2*[15]), ([30], 3*[10])]
"""
return [format_chunks(c) for c in plan]
|
gpl-3.0
|
googleapis/googleapis-gen
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/errors/types/third_party_app_analytics_link_error.py
|
1
|
1401
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.errors',
marshal='google.ads.googleads.v7',
manifest={
'ThirdPartyAppAnalyticsLinkErrorEnum',
},
)
class ThirdPartyAppAnalyticsLinkErrorEnum(proto.Message):
r"""Container for enum describing possible third party app
analytics link errors.
"""
class ThirdPartyAppAnalyticsLinkError(proto.Enum):
r"""Enum describing possible third party app analytics link
errors.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_ANALYTICS_PROVIDER_ID = 2
INVALID_MOBILE_APP_ID = 3
MOBILE_APP_IS_NOT_ENABLED = 4
CANNOT_REGENERATE_SHAREABLE_LINK_ID_FOR_REMOVED_LINK = 5
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
|
ocefpaf/seapy
|
docs/conf.py
|
1
|
10409
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SeaPY documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 6 11:03:18 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc',
]
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'seapy'
copyright = '2017, University of Hawaii, MIT-License'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0a1'
# The full version, including alpha/beta/rc tags.
release = '1.0a1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SeaPYdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'SeaPY.tex', 'SeaPY Documentation',
'Brian', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'seapy', 'seapy Documentation',
['Powell Lab'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SeaPY', 'seapy Documentation',
'Powell Lab', 'SeaPY', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'seapy'
epub_author = 'Powell Lab'
epub_publisher = 'Powell Lab'
epub_copyright = '2017, Powell Lab'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'seapy'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
mit
|
aykol/pymatgen
|
pymatgen/io/abinit/qjobs.py
|
10
|
17260
|
# coding: utf-8
"""
Objects and methods to contact the resource manager to get info on the status of the job and useful statistics.
Note that this is not a wrapper for the C API but a collection of simple wrappers around the shell commands
provided by the resource manager (qsub, qdel and qstat for PBS, sinfo, squeue... for Slurm).
The main goal indeed is providing a simplified common interface for different resource managers without
having to rely on external libraries.
"""
from __future__ import print_function, division, unicode_literals
import shlex
from collections import OrderedDict, defaultdict
from subprocess import Popen, PIPE
from monty.collections import AttrDict
from monty.inspect import all_subclasses
import logging
logger = logging.getLogger(__name__)
class JobStatus(int):
"""
This object is an integer representing the status of a :class:`QueueJob`.
Slurm API, see `man squeue`.
JOB STATE CODES
Jobs typically pass through several states in the course of their execution. The typical states are
PENDING, RUNNING, SUSPENDED, COMPLETING, and COMPLETED. An explanation of each state follows.
BF BOOT_FAIL Job terminated due to launch failure, typically due to a hardware failure (e.g.
unable to boot the node or block and the job can not be requeued).
CA CANCELLED Job was explicitly cancelled by the user or system administrator.
The job may or may not have been initiated.
CD COMPLETED Job has terminated all processes on all nodes.
CF CONFIGURING Job has been allocated resources, but are waiting for them to become ready for use (e.g. booting).
CG COMPLETING Job is in the process of completing. Some processes on some nodes may still be active.
F FAILED Job terminated with non-zero exit code or other failure condition.
NF NODE_FAIL Job terminated due to failure of one or more allocated nodes.
PD PENDING Job is awaiting resource allocation.
PR PREEMPTED Job terminated due to preemption.
R RUNNING Job currently has an allocation.
S SUSPENDED Job has an allocation, but execution has been suspended.
TO TIMEOUT Job terminated upon reaching its time limit.
SE SPECIAL_EXIT The job was requeued in a special state. This state can be set by users, typically
in EpilogSlurmctld, if the job has terminated with a particular exit value.
"""
_STATUS_TABLE = OrderedDict([
(-1, "UNKNOWN"),
(0, "PENDING"),
(1, "RUNNING"),
(2, "RESIZING"),
(3, "SUSPENDED"),
(4, "COMPLETED"),
(5, "CANCELLED"),
(6, "FAILED"),
(7, "TIMEOUT"),
(8, "PREEMPTED"),
(9, "NODEFAIL"),
])
def __repr__(self):
return "<%s: %s, at %s>" % (self.__class__.__name__, str(self), id(self))
def __str__(self):
"""String representation."""
return self._STATUS_TABLE[self]
@classmethod
def from_string(cls, s):
"""Return a :class:`JobStatus` instance from its string representation."""
for num, text in cls._STATUS_TABLE.items():
if text == s: return cls(num)
else:
#raise ValueError("Wrong string %s" % s)
logger.warning("Got unknown status: %s" % s)
return cls.from_string("UNKNOWN")
class QueueJob(object):
"""
This object provides methods to contact the resource manager to get info on the status
of the job and useful statistics. This is an abstract class.
"""
QTYPE = None
# Used to handle other resource managers.
S_UNKNOWN = JobStatus.from_string("UNKNOWN")
# Slurm status
S_PENDING = JobStatus.from_string("PENDING")
S_RUNNING = JobStatus.from_string("RUNNING")
S_RESIZING = JobStatus.from_string("RESIZING")
S_SUSPENDED = JobStatus.from_string("SUSPENDED")
S_COMPLETED = JobStatus.from_string("COMPLETED")
S_CANCELLED = JobStatus.from_string("CANCELLED")
S_FAILED = JobStatus.from_string("FAILED")
S_TIMEOUT = JobStatus.from_string("TIMEOUT")
S_PREEMPTED = JobStatus.from_string("PREEMPTED")
S_NODEFAIL = JobStatus.from_string("NODEFAIL")
@staticmethod
def from_qtype_and_id(qtype, queue_id, qname=None):
"""
Return a new istance of the appropriate subclass.
Args:
qtype: String specifying the Resource manager type.
queue_id: Job identifier.
qname: Name of the queue (optional).
"""
for cls in all_subclasses(QueueJob):
if cls.QTYPE == qtype: break
else:
logger.critical("Cannot find QueueJob subclass registered for qtype %s" % qtype)
cls = QueueJob
return cls(queue_id, qname=qname)
def __init__(self, queue_id, qname="UnknownQueue"):
"""
Args:
queue_id: Job identifier.
qname: Name of the queue (optional).
"""
self.qid, self.qname = queue_id, qname
# Initialize properties.
self.status, self.exitcode, self.signal = None, None, None
def __repr__(self):
return "<%s, qid=%s, status=%s, exit_code=%s>" % (
self.__class__.__name__, self.qid, self.status, self.exitcode)
def __bool__(self):
return self.qid is not None
__nonzero__ = __bool__
#In many cases, we only need to know if job is terminated or not
#def is_terminated()
@property
def is_completed(self):
return self.status == self.S_COMPLETED
@property
def is_running(self):
return self.status == self.S_RUNNING
@property
def is_failed(self):
return self.status == self.S_FAILED
@property
def timeout(self):
return self.status == self.S_TIMEOUT
@property
def has_node_failures(self):
return self.status == self.S_NODEFAIL
@property
def unknown_status(self):
return self.status == self.S_UNKNOWN
def set_status_exitcode_signal(self, status, exitcode, signal):
self.status, self.exitcode, self.signal = status, exitcode, signal
def likely_code_error(self):
"""
See http://man7.org/linux/man-pages/man7/signal.7.html
SIGHUP 1 Term Hangup detected on controlling terminal or death of controlling process
SIGINT 2 Term Interrupt from keyboard
SIGQUIT 3 Core Quit from keyboard
SIGILL 4 Core Illegal Instruction
SIGABRT 6 Core Abort signal from abort(3)
SIGFPE 8 Core Floating point exception
SIGKILL 9 Term Kill signal
SIGSEGV 11 Core Invalid memory reference
SIGPIPE 13 Term Broken pipe: write to pipe with no readers
SIGALRM 14 Term Timer signal from alarm(2)
SIGTERM 15 Term Termination signal
SIGUSR1 30,10,16 Term User-defined signal 1
SIGUSR2 31,12,17 Term User-defined signal 2
SIGCHLD 20,17,18 Ign Child stopped or terminated
SIGCONT 19,18,25 Cont Continue if stopped
SIGSTOP 17,19,23 Stop Stop process
SIGTSTP 18,20,24 Stop Stop typed at terminal
SIGTTIN 21,21,26 Stop Terminal input for background process
SIGTTOU 22,22,27 Stop Terminal output for background process
The signals SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
Next the signals not in the POSIX.1-1990 standard but described in
SUSv2 and POSIX.1-2001.
Signal Value Action Comment
SIGBUS 10,7,10 Core Bus error (bad memory access)
SIGPOLL Term Pollable event (Sys V).
Synonym for SIGIO
SIGPROF 27,27,29 Term Profiling timer expired
SIGSYS 12,31,12 Core Bad argument to routine (SVr4)
SIGTRAP 5 Core Trace/breakpoint trap
SIGURG 16,23,21 Ign Urgent condition on socket (4.2BSD)
SIGVTALRM 26,26,28 Term Virtual alarm clock (4.2BSD)
SIGXCPU 24,24,30 Core CPU time limit exceeded (4.2BSD)
SIGXFSZ 25,25,31 Core File size limit exceeded (4.2BSD)
"""
for sig_name in ("SIGFPE",):
if self.received_signal(sig_name): return sig_name
return False
def received_signal(self, sig_name):
if self.signal is None: return False
# Get the numeric value from signal and compare it with self.signal
import signal
try:
return self.signal == getattr(signal, sig_name)
except AttributeError:
# invalid sig_name or sig_name not available on this OS.
return False
def estimated_start_time(self):
"""Return date with estimated start time. None if it cannot be detected"""
return None
def get_info(self, **kwargs):
return None
def get_nodes(self, **kwargs):
return None
def get_stats(self, **kwargs):
return None
class ShellJob(QueueJob):
"""Handler for Shell jobs."""
QTYPE = "shell"
class SlurmJob(QueueJob):
"""Handler for Slurm jobs."""
QTYPE = "slurm"
def estimated_start_time(self):
#squeue --start -j 116791
# JOBID PARTITION NAME USER ST START_TIME NODES NODELIST(REASON)
# 116791 defq gs6q2wop username PD 2014-11-04T09:27:15 16 (QOSResourceLimit)
cmd = "squeue" "--start", "--job %d" % self.qid
process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
if process.returncode != 0:
logger.critical(err)
return None
lines = out.splitlines()
if len(lines) <= 2: return None
from datetime import datetime
for line in lines:
tokens = line.split()
if int(tokens[0]) == self.qid:
date_string = tokens[5]
if date_string == "N/A": return None
return datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S")
return None
def get_info(self, **kwargs):
# See https://computing.llnl.gov/linux/slurm/sacct.html
#If SLURM job ids are reset, some job numbers will
#probably appear more than once refering to different jobs.
#Without this option only the most recent jobs will be displayed.
#state Displays the job status, or state.
#Output can be RUNNING, RESIZING, SUSPENDED, COMPLETED, CANCELLED, FAILED, TIMEOUT,
#PREEMPTED or NODE_FAIL. If more information is available on the job state than will fit
#into the current field width (for example, the uid that CANCELLED a job) the state will be followed by a "+".
#gmatteo@master2:~
#sacct --job 112367 --format=jobid,exitcode,state --allocations --parsable2
#JobID|ExitCode|State
#112367|0:0|RUNNING
#scontrol show job 800197 --oneliner
# For more info
#login1$ scontrol show job 1676354
#cmd = "sacct --job %i --format=jobid,exitcode,state --allocations --parsable2" % self.qid
cmd = "scontrol show job %i --oneliner" % self.qid
process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
if process.returncode != 0:
logger.critical(err)
return None
tokens = out.splitlines()
info = AttrDict()
for line in tokens:
#print(line)
k, v = line.split("=")
info[k] = v
#print(info)
qid = int(info.JobId)
assert qid == self.qid
exitcode = info.ExitCode
status = info.JobState
if ":" in exitcode:
exitcode, signal = map(int, exitcode.split(":"))
else:
exitcode, signal = int(exitcode), None
i = status.find("+")
if i != -1: status = status[:i]
self.set_status_exitcode_signal(JobStatus.from_string(status), exitcode, signal)
return AttrDict(exitcode=exitcode, signal=signal, status=status)
def get_stats(self, **kwargs):
cmd = "sacct --long --job %s --parsable2" % self.qid
process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
if process.returncode != 0:
logger.critical(err)
return {}
lines = out.splitlines()
keys = lines[0].strip().split("|")
values = lines[1].strip().split("|")
#print("lines0", lines[0])
return dict(zip(keys, values))
class PbsProJob(QueueJob):
"""
Handler for PbsPro Jobs.
See also https://github.com/plediii/pbs_util for a similar project.
"""
QTYPE = "pbspro"
# Mapping PrbPro --> Slurm. From `man qstat`
#
# S The job’s state:
# B Array job has at least one subjob running.
# E Job is exiting after having run.
# F Job is finished.
# H Job is held.
# M Job was moved to another server.
# Q Job is queued.
# R Job is running.
# S Job is suspended.
# T Job is being moved to new location.
# U Cycle-harvesting job is suspended due to keyboard activity.
# W Job is waiting for its submitter-assigned start time to be reached.
# X Subjob has completed execution or has been deleted.
PBSSTAT_TO_SLURM = defaultdict(lambda x: QueueJob.S_UNKNOWN, [
("E", QueueJob.S_FAILED),
("F", QueueJob.S_COMPLETED),
("Q", QueueJob.S_PENDING),
("R", QueueJob.S_RUNNING),
("S", QueueJob.S_SUSPENDED),
])
def estimated_start_time(self):
# qstat -T - Shows the estimated start time for all jobs in the queue.
# Est
# Req'd Req'd Start
#Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time
#--------------- -------- -------- ---------- ------ --- --- ------ ----- - -----
#5669001.frontal username large gs.Pt -- 96 96 -- 03:00 Q --
cmd = "qstat %s -T" % self.qid
process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
if process.returncode != 0:
logger.critical(err)
return None
line = out.splitlines()[-1]
sdate = line.split()[-1]
if sdate in ("--", "?"):
return None
# TODO One should convert to datetime
return sdate
def get_info(self, **kwargs):
# See also qstat -f
#http://sc.tamu.edu/help/origins/batch.shtml#qstat
#$> qstat 5666289
#frontal1:
# Req'd Req'd Elap
#Job ID Username Queue Jobname SessID NDS TSK Memory Time S Time
#--------------- -------- -------- ---------- ------ --- --- ------ ----- - -----
#5666289.frontal username main_ivy MorfeoTChk 57546 1 4 -- 08:00 R 00:17
cmd = "qstat %d" % self.qid
process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
if process.returncode != 0:
# qstat: 5904257.frontal1 Job has finished, use -x or -H to obtain historical job information\n
cmd = "qstat %d -x" % self.qid
process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
if process.returncode != 0:
logger.critical(out)
logger.critical(err)
return None
# Here I don't know what's happeing but I get an output that differs from the one obtained in the terminal.
# Job id Name User Time Use S Queue
# ---------------- ---------------- ---------------- -------- - -----
# 5905011.frontal1 t0 gmatteo 01:37:08 F main_wes
#print(out)
line = out.splitlines()[-1]
#print(line.split())
status = self.PBSSTAT_TO_SLURM[line.split()[4]]
# Exit code and signal are not available.
# Once could use tracejob....
# See also http://docs.adaptivecomputing.com/torque/3-0-5/a.gprologueepilogue.php
self.set_status_exitcode_signal(status, None, None)
#################################
# Unsupported resource managers #
#################################
class TorqueJob(QueueJob):
"""Not supported"""
QTYPE = "torque"
class SgeJob(QueueJob):
"""Not supported"""
QTYPE = "sge"
class MoabJob(QueueJob):
"""Not supported"""
QTYPE = "moab"
class BlueGeneJob(QueueJob):
"""Not supported"""
QTYPE = "bluegene"
|
mit
|
TanUkkii007/openjtalk-test
|
gtest-1.7.0/test/gtest_xml_test_utils.py
|
1815
|
8876
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
__author__ = '[email protected] (Sean Mcafee)'
import re
from xml.dom import minidom, Node
import gtest_test_utils
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml'
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
Exceptions are any attribute named "time", which needs only be
convertible to a floating-point number and any attribute named
"type_param" which only has to be non-empty.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node .attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % (
actual_node.tagName, expected_attributes.keys(),
actual_attributes.keys()))
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
'expected attribute %s not found in element %s' %
(expected_attr.name, actual_node.tagName))
self.assertEquals(
expected_attr.value, actual_attr.value,
' values of attribute %s in element %s differ: %s vs %s' %
(expected_attr.name, actual_node.tagName,
expected_attr.value, actual_attr.value))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
'number of child elements differ in element ' + actual_node.tagName)
for child_id, child in expected_children.iteritems():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
'testsuites': 'name',
'testsuite': 'name',
'testcase': 'name',
'failure': 'message',
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite> and <testcase> elements, the ID
is the value of their "name" attribute; for <failure> elements, it is
the value of the "message" attribute; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.assert_(child.tagName in self.identifying_attribute,
'Encountered unknown element <%s>' % child.tagName)
childID = child.getAttribute(self.identifying_attribute[child.tagName])
self.assert_(childID not in children)
children[childID] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if 'detail' not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children['detail'] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children['detail'].nodeValue += child.nodeValue
else:
self.fail('Encountered unexpected node type %d' % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The "timestamp" attribute of <testsuites> elements is replaced with a
single asterisk, if it contains a valid ISO8601 datetime value.
* The "type_param" attribute of <testcase> elements is replaced with a
single asterisk (if it sn non-empty) as it is the type name returned
by the compiler and is platform dependent.
* The line info reported in the first line of the "message"
attribute and CDATA section of <failure> elements is replaced with the
file's basename and a single asterisk for the line number.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName == 'testsuites':
timestamp = element.getAttributeNode('timestamp')
timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$',
'*', timestamp.value)
if element.tagName in ('testsuites', 'testsuite', 'testcase'):
time = element.getAttributeNode('time')
time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value)
type_param = element.getAttributeNode('type_param')
if type_param and type_param.value:
type_param.value = '*'
elif element.tagName == 'failure':
source_line_pat = r'^.*[/\\](.*:)\d+\n'
# Replaces the source line information with a normalized form.
message = element.getAttributeNode('message')
message.value = re.sub(source_line_pat, '\\1*\n', message.value)
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Replaces the source line information with a normalized form.
cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r'\nStack trace:\n(.|\n)*',
'', cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
|
mit
|
django-nonrel/django
|
tests/null_fk_ordering/tests.py
|
150
|
2069
|
from __future__ import absolute_import
from django.test import TestCase
from .models import Author, Article, SystemInfo, Forum, Post, Comment
class NullFkOrderingTests(TestCase):
def test_ordering_across_null_fk(self):
"""
Regression test for #7512
ordering across nullable Foreign Keys shouldn't exclude results
"""
author_1 = Author.objects.create(name='Tom Jones')
author_2 = Author.objects.create(name='Bob Smith')
article_1 = Article.objects.create(title='No author on this article')
article_2 = Article.objects.create(author=author_1, title='This article written by Tom Jones')
article_3 = Article.objects.create(author=author_2, title='This article written by Bob Smith')
# We can't compare results directly (since different databases sort NULLs to
# different ends of the ordering), but we can check that all results are
# returned.
self.assertTrue(len(list(Article.objects.all())) == 3)
s = SystemInfo.objects.create(system_name='System Info')
f = Forum.objects.create(system_info=s, forum_name='First forum')
p = Post.objects.create(forum=f, title='First Post')
c1 = Comment.objects.create(post=p, comment_text='My first comment')
c2 = Comment.objects.create(comment_text='My second comment')
s2 = SystemInfo.objects.create(system_name='More System Info')
f2 = Forum.objects.create(system_info=s2, forum_name='Second forum')
p2 = Post.objects.create(forum=f2, title='Second Post')
c3 = Comment.objects.create(comment_text='Another first comment')
c4 = Comment.objects.create(post=p2, comment_text='Another second comment')
# We have to test this carefully. Some databases sort NULL values before
# everything else, some sort them afterwards. So we extract the ordered list
# and check the length. Before the fix, this list was too short (some values
# were omitted).
self.assertTrue(len(list(Comment.objects.all())) == 4)
|
bsd-3-clause
|
kakunbsc/enigma2.2
|
lib/python/Components/Converter/ConfigEntryTest.py
|
27
|
1565
|
from Converter import Converter
from Components.Element import cached
from Components.config import configfile
class ConfigEntryTest(Converter, object):
def __init__(self, argstr):
Converter.__init__(self, argstr)
args = argstr.split(',')
self.argerror = False
self.checkSourceBoolean = False
self.invert = False
self.configKey = None
self.configValue = None
if len(args) < 2:
self.argerror = True
else:
if args[0].find("config.") != -1:
self.configKey = args[0]
self.configValue = args[1]
if len(args) > 2:
if args[2] == 'Invert':
self.invert = True
elif args[2] == 'CheckSourceBoolean':
self.checkSourceBoolean = True
else:
self.argerror = True
if len(args) > 3:
if args[3] == 'Invert':
self.invert = True
elif args[3] == 'CheckSourceBoolean':
self.checkSourceBoolean = True
else:
self.argerror = True
else:
self.argerror = True
if self.argerror:
print "ConfigEntryTest Converter got incorrect arguments", args, "!!!\narg[0] must start with 'config.',\narg[1] is the compare string,\narg[2],arg[3] are optional arguments and must be 'Invert' or 'CheckSourceBoolean'"
@cached
def getBoolean(self):
if self.argerror:
print "ConfigEntryTest got invalid arguments", self.converter_arguments, "force True!!"
return True
if self.checkSourceBoolean and not self.source.boolean:
return False
val = configfile.getResolvedKey(self.configKey)
ret = val == self.configValue
return ret ^ self.invert
boolean = property(getBoolean)
|
gpl-2.0
|
petebachant/scipy
|
scipy/stats/mstats.py
|
37
|
1508
|
"""
===================================================================
Statistical functions for masked arrays (:mod:`scipy.stats.mstats`)
===================================================================
.. currentmodule:: scipy.stats.mstats
This module contains a large number of statistical functions that can
be used with masked arrays.
Most of these functions are similar to those in scipy.stats but might
have small differences in the API or in the algorithm used. Since this
is a relatively new package, some API changes are still possible.
.. autosummary::
:toctree: generated/
argstoarray
betai
chisquare
count_tied_groups
describe
f_oneway
f_value_wilks_lambda
find_repeats
friedmanchisquare
kendalltau
kendalltau_seasonal
kruskalwallis
ks_twosamp
kurtosis
kurtosistest
linregress
mannwhitneyu
plotting_positions
mode
moment
mquantiles
msign
normaltest
obrientransform
pearsonr
plotting_positions
pointbiserialr
rankdata
scoreatpercentile
sem
signaltonoise
skew
skewtest
spearmanr
theilslopes
threshold
tmax
tmean
tmin
trim
trima
trimboth
trimmed_stde
trimr
trimtail
tsem
ttest_onesamp
ttest_ind
ttest_onesamp
ttest_rel
tvar
variation
winsorize
zmap
zscore
"""
from __future__ import division, print_function, absolute_import
from .mstats_basic import *
from .mstats_extras import *
from scipy.stats import gmean, hmean
|
bsd-3-clause
|
69495/Zooarchaeologist
|
mstdn/mining.py
|
1
|
1205
|
from mastodon import Mastodon
import json
from login import login
from out_json import jsoner
'''
return_typeはjson or list
defaultはlistで返す
'''
def mining(id, return_type="list", switch=None):
print(return_type + " is selected!")
Mastodon = login(switch)
#timelineからlastestなmax_idを取得
tl = Mastodon.timeline_local(limit=1)
initial_max_id = tl[0]['id']
toot = Mastodon.account_statuses(id, initial_max_id, None, 40)
while True:
last_max_id = toot[len(toot)-1]['id']
#続きのtootを取得
last_toot = Mastodon.account_statuses(id, last_max_id, None, 40)
toot.extend(last_toot)
# final_max_lenge = len(toot)-1
final_max_lenge = len(last_toot) -1
# account = Mastodon.account(id)
# count = account['statuses_count']
toot_count = toot[0]['account']['statuses_count']
print(str(len(toot)) + '/' + str(toot_count))
if final_max_lenge < 39:
break
if return_type == "json":
filename = str(id)
jsoner(toot,filename)
else:
return toot
# id = int(input())
# mining(id)
if __name__ == '__mining__':
mining()
|
mit
|
dragosbdi/bicreditsnew
|
qa/rpc-tests/listtransactions.py
|
4
|
4726
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcredit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework import BitcreditTestFramework
from bitcreditrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
class ListTransactionsTest(BitcreditTestFramework):
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].setgenerate(True, 1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
if __name__ == '__main__':
ListTransactionsTest().main()
|
mit
|
agileblaze/OpenStackTwoFactorAuthentication
|
openstack_dashboard/dashboards/project/databases/tabs.py
|
50
|
4310
|
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django import template
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.databases import tables
class OverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
def get_context_data(self, request):
return {"instance": self.tab_group.kwargs['instance']}
def get_template_name(self, request):
instance = self.tab_group.kwargs['instance']
template_file = ('project/databases/_detail_overview_%s.html'
% instance.datastore['type'])
try:
template.loader.get_template(template_file)
return template_file
except template.TemplateDoesNotExist:
# This datastore type does not have a template file
# Just use the base template file
return ('project/databases/_detail_overview.html')
class UserTab(tabs.TableTab):
table_classes = [tables.UsersTable]
name = _("Users")
slug = "users_tab"
instance = None
template_name = "horizon/common/_detail_table.html"
preload = False
def get_users_data(self):
instance = self.tab_group.kwargs['instance']
try:
data = api.trove.users_list(self.request, instance.id)
for user in data:
user.instance = instance
user.access = api.trove.user_list_access(self.request,
instance.id,
user.name)
except Exception:
msg = _('Unable to get user data.')
exceptions.handle(self.request, msg)
data = []
return data
def allowed(self, request):
perms = getattr(settings, 'TROVE_ADD_USER_PERMS', [])
if perms:
return request.user.has_perms(perms)
return True
class DatabaseTab(tabs.TableTab):
table_classes = [tables.DatabaseTable]
name = _("Databases")
slug = "database_tab"
instance = None
template_name = "horizon/common/_detail_table.html"
preload = False
def get_databases_data(self):
instance = self.tab_group.kwargs['instance']
try:
data = api.trove.database_list(self.request, instance.id)
add_instance = lambda d: setattr(d, 'instance', instance)
map(add_instance, data)
except Exception:
msg = _('Unable to get databases data.')
exceptions.handle(self.request, msg)
data = []
return data
def allowed(self, request):
perms = getattr(settings, 'TROVE_ADD_DATABASE_PERMS', [])
if perms:
return request.user.has_perms(perms)
return True
class BackupsTab(tabs.TableTab):
table_classes = [tables.InstanceBackupsTable]
name = _("Backups")
slug = "backups_tab"
instance = None
template_name = "horizon/common/_detail_table.html"
preload = False
def get_backups_data(self):
instance = self.tab_group.kwargs['instance']
try:
data = api.trove.instance_backups(self.request, instance.id)
except Exception:
msg = _('Unable to get database backup data.')
exceptions.handle(self.request, msg)
data = []
return data
def allowed(self, request):
return request.user.has_perm('openstack.services.object-store')
class InstanceDetailTabs(tabs.TabGroup):
slug = "instance_details"
tabs = (OverviewTab, UserTab, DatabaseTab, BackupsTab)
sticky = True
|
apache-2.0
|
GarySparrow/mFlaskWeb
|
venv/Lib/site-packages/sqlalchemy/orm/relationships.py
|
21
|
111121
|
# orm/relationships.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Heuristics related to join conditions as used in
:func:`.relationship`.
Provides the :class:`.JoinCondition` object, which encapsulates
SQL annotation and aliasing behavior focused on the `primaryjoin`
and `secondaryjoin` aspects of :func:`.relationship`.
"""
from __future__ import absolute_import
from .. import sql, util, exc as sa_exc, schema, log
from .util import CascadeOptions, _orm_annotate, _orm_deannotate
from . import dependency
from . import attributes
from ..sql.util import (
ClauseAdapter,
join_condition, _shallow_annotate, visit_binary_product,
_deep_deannotate, selectables_overlap
)
from ..sql import operators, expression, visitors
from .interfaces import (MANYTOMANY, MANYTOONE, ONETOMANY,
StrategizedProperty, PropComparator)
from ..inspection import inspect
from . import mapper as mapperlib
import collections
def remote(expr):
"""Annotate a portion of a primaryjoin expression
with a 'remote' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. versionadded:: 0.8
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.foreign`
"""
return _annotate_columns(expression._clause_element_as_expr(expr),
{"remote": True})
def foreign(expr):
"""Annotate a portion of a primaryjoin expression
with a 'foreign' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. versionadded:: 0.8
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.remote`
"""
return _annotate_columns(expression._clause_element_as_expr(expr),
{"foreign": True})
@log.class_logger
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class RelationshipProperty(StrategizedProperty):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`.orm.relationship` function.
See also:
:ref:`relationship_config_toplevel`
"""
strategy_wildcard_key = 'relationship'
_dependency_processor = None
def __init__(self, argument,
secondary=None, primaryjoin=None,
secondaryjoin=None,
foreign_keys=None,
uselist=None,
order_by=False,
backref=None,
back_populates=None,
post_update=False,
cascade=False, extension=None,
viewonly=False, lazy=True,
collection_class=None, passive_deletes=False,
passive_updates=True, remote_side=None,
enable_typechecks=True, join_depth=None,
comparator_factory=None,
single_parent=False, innerjoin=False,
distinct_target_key=None,
doc=None,
active_history=False,
cascade_backrefs=True,
load_on_pending=False,
strategy_class=None, _local_remote_pairs=None,
query_class=None,
info=None):
"""Provide a relationship between two mapped classes.
This corresponds to a parent-child or associative table relationship.
The constructed class is an instance of
:class:`.RelationshipProperty`.
A typical :func:`.relationship`, used in a classical mapping::
mapper(Parent, properties={
'children': relationship(Child)
})
Some arguments accepted by :func:`.relationship` optionally accept a
callable function, which when called produces the desired value.
The callable is invoked by the parent :class:`.Mapper` at "mapper
initialization" time, which happens only when mappers are first used,
and is assumed to be after all mappings have been constructed. This
can be used to resolve order-of-declaration and other dependency
issues, such as if ``Child`` is declared below ``Parent`` in the same
file::
mapper(Parent, properties={
"children":relationship(lambda: Child,
order_by=lambda: Child.id)
})
When using the :ref:`declarative_toplevel` extension, the Declarative
initializer allows string arguments to be passed to
:func:`.relationship`. These string arguments are converted into
callables that evaluate the string as Python code, using the
Declarative class-registry as a namespace. This allows the lookup of
related classes to be automatic via their string name, and removes the
need to import related classes at all into the local module space::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
children = relationship("Child", order_by="Child.id")
.. seealso::
:ref:`relationship_config_toplevel` - Full introductory and
reference documentation for :func:`.relationship`.
:ref:`orm_tutorial_relationship` - ORM tutorial introduction.
:param argument:
a mapped class, or actual :class:`.Mapper` instance, representing
the target of the relationship.
:paramref:`~.relationship.argument` may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a Python-evaluable string when using Declarative.
.. seealso::
:ref:`declarative_configuring_relationships` - further detail
on relationship configuration when using Declarative.
:param secondary:
for a many-to-many relationship, specifies the intermediary
table, and is typically an instance of :class:`.Table`.
In less common circumstances, the argument may also be specified
as an :class:`.Alias` construct, or even a :class:`.Join` construct.
:paramref:`~.relationship.secondary` may
also be passed as a callable function which is evaluated at
mapper initialization time. When using Declarative, it may also
be a string argument noting the name of a :class:`.Table` that is
present in the :class:`.MetaData` collection associated with the
parent-mapped :class:`.Table`.
The :paramref:`~.relationship.secondary` keyword argument is
typically applied in the case where the intermediary :class:`.Table`
is not otherwise exprssed in any direct class mapping. If the
"secondary" table is also explicitly mapped elsewhere (e.g. as in
:ref:`association_pattern`), one should consider applying the
:paramref:`~.relationship.viewonly` flag so that this
:func:`.relationship` is not used for persistence operations which
may conflict with those of the association object pattern.
.. seealso::
:ref:`relationships_many_to_many` - Reference example of "many
to many".
:ref:`orm_tutorial_many_to_many` - ORM tutorial introduction to
many-to-many relationships.
:ref:`self_referential_many_to_many` - Specifics on using
many-to-many in a self-referential case.
:ref:`declarative_many_to_many` - Additional options when using
Declarative.
:ref:`association_pattern` - an alternative to
:paramref:`~.relationship.secondary` when composing association
table relationships, allowing additional attributes to be
specified on the association table.
:ref:`composite_secondary_join` - a lesser-used pattern which
in some cases can enable complex :func:`.relationship` SQL
conditions to be used.
.. versionadded:: 0.9.2 :paramref:`~.relationship.secondary` works
more effectively when referring to a :class:`.Join` instance.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
many-to-one reference should be loaded when replaced, if
not already loaded. Normally, history tracking logic for
simple many-to-ones only needs to be aware of the "new"
value in order to perform a flush. This flag is available
for applications that make use of
:func:`.attributes.get_history` which also need to know
the "previous" value of the attribute.
:param backref:
indicates the string name of a property to be placed on the related
mapper's class that will handle this relationship in the other
direction. The other property will be created automatically
when the mappers are configured. Can also be passed as a
:func:`.backref` object to control the configuration of the
new relationship.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`~.relationship.back_populates` - alternative form
of backref specification.
:func:`.backref` - allows control over :func:`.relationship`
configuration when using :paramref:`~.relationship.backref`.
:param back_populates:
Takes a string name and has the same meaning as
:paramref:`~.relationship.backref`, except the complementing
property is **not** created automatically, and instead must be
configured explicitly on the other mapper. The complementing
property should also indicate
:paramref:`~.relationship.back_populates` to this relationship to
ensure proper functioning.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`~.relationship.backref` - alternative form
of backref specification.
:param cascade:
a comma-separated list of cascade rules which determines how
Session operations should be "cascaded" from parent to child.
This defaults to ``False``, which means the default cascade
should be used - this default cascade is ``"save-update, merge"``.
The available cascades are ``save-update``, ``merge``,
``expunge``, ``delete``, ``delete-orphan``, and ``refresh-expire``.
An additional option, ``all`` indicates shorthand for
``"save-update, merge, refresh-expire,
expunge, delete"``, and is often used as in ``"all, delete-orphan"``
to indicate that related objects should follow along with the
parent object in all cases, and be deleted when de-associated.
.. seealso::
:ref:`unitofwork_cascades` - Full detail on each of the available
cascade options.
:ref:`tutorial_delete_cascade` - Tutorial example describing
a delete cascade.
:param cascade_backrefs=True:
a boolean value indicating if the ``save-update`` cascade should
operate along an assignment event intercepted by a backref.
When set to ``False``, the attribute managed by this relationship
will not cascade an incoming transient object into the session of a
persistent parent, if the event is received via backref.
.. seealso::
:ref:`backref_cascade` - Full discussion and examples on how
the :paramref:`~.relationship.cascade_backrefs` option is used.
:param collection_class:
a class or callable that returns a new list-holding object. will
be used in place of a plain list for storing elements.
.. seealso::
:ref:`custom_collections` - Introductory documentation and
examples.
:param comparator_factory:
a class which extends :class:`.RelationshipProperty.Comparator`
which provides custom SQL clause generation for comparison
operations.
.. seealso::
:class:`.PropComparator` - some detail on redefining comparators
at this level.
:ref:`custom_comparators` - Brief intro to this feature.
:param distinct_target_key=None:
Indicate if a "subquery" eager load should apply the DISTINCT
keyword to the innermost SELECT statement. When left as ``None``,
the DISTINCT keyword will be applied in those cases when the target
columns do not comprise the full primary key of the target table.
When set to ``True``, the DISTINCT keyword is applied to the
innermost SELECT unconditionally.
It may be desirable to set this flag to False when the DISTINCT is
reducing performance of the innermost subquery beyond that of what
duplicate innermost rows may be causing.
.. versionadded:: 0.8.3 -
:paramref:`~.relationship.distinct_target_key` allows the
subquery eager loader to apply a DISTINCT modifier to the
innermost SELECT.
.. versionchanged:: 0.9.0 -
:paramref:`~.relationship.distinct_target_key` now defaults to
``None``, so that the feature enables itself automatically for
those cases where the innermost query targets a non-unique
key.
.. seealso::
:ref:`loading_toplevel` - includes an introduction to subquery
eager loading.
:param doc:
docstring which will be applied to the resulting descriptor.
:param extension:
an :class:`.AttributeExtension` instance, or list of extensions,
which will be prepended to the list of attribute listeners for
the resulting descriptor placed on the class.
.. deprecated:: 0.7 Please see :class:`.AttributeEvents`.
:param foreign_keys:
a list of columns which are to be used as "foreign key"
columns, or columns which refer to the value in a remote
column, within the context of this :func:`.relationship`
object's :paramref:`~.relationship.primaryjoin` condition.
That is, if the :paramref:`~.relationship.primaryjoin`
condition of this :func:`.relationship` is ``a.id ==
b.a_id``, and the values in ``b.a_id`` are required to be
present in ``a.id``, then the "foreign key" column of this
:func:`.relationship` is ``b.a_id``.
In normal cases, the :paramref:`~.relationship.foreign_keys`
parameter is **not required.** :func:`.relationship` will
automatically determine which columns in the
:paramref:`~.relationship.primaryjoin` conditition are to be
considered "foreign key" columns based on those
:class:`.Column` objects that specify :class:`.ForeignKey`,
or are otherwise listed as referencing columns in a
:class:`.ForeignKeyConstraint` construct.
:paramref:`~.relationship.foreign_keys` is only needed when:
1. There is more than one way to construct a join from the local
table to the remote table, as there are multiple foreign key
references present. Setting ``foreign_keys`` will limit the
:func:`.relationship` to consider just those columns specified
here as "foreign".
.. versionchanged:: 0.8
A multiple-foreign key join ambiguity can be resolved by
setting the :paramref:`~.relationship.foreign_keys`
parameter alone, without the need to explicitly set
:paramref:`~.relationship.primaryjoin` as well.
2. The :class:`.Table` being mapped does not actually have
:class:`.ForeignKey` or :class:`.ForeignKeyConstraint`
constructs present, often because the table
was reflected from a database that does not support foreign key
reflection (MySQL MyISAM).
3. The :paramref:`~.relationship.primaryjoin` argument is used to
construct a non-standard join condition, which makes use of
columns or expressions that do not normally refer to their
"parent" column, such as a join condition expressed by a
complex comparison using a SQL function.
The :func:`.relationship` construct will raise informative
error messages that suggest the use of the
:paramref:`~.relationship.foreign_keys` parameter when
presented with an ambiguous condition. In typical cases,
if :func:`.relationship` doesn't raise any exceptions, the
:paramref:`~.relationship.foreign_keys` parameter is usually
not needed.
:paramref:`~.relationship.foreign_keys` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. seealso::
:ref:`relationship_foreign_keys`
:ref:`relationship_custom_foreign`
:func:`.foreign` - allows direct annotation of the "foreign"
columns within a :paramref:`~.relationship.primaryjoin` condition.
.. versionadded:: 0.8
The :func:`.foreign` annotation can also be applied
directly to the :paramref:`~.relationship.primaryjoin`
expression, which is an alternate, more specific system of
describing which columns in a particular
:paramref:`~.relationship.primaryjoin` should be considered
"foreign".
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
.. versionadded:: 0.8
:param innerjoin=False:
when ``True``, joined eager loads will use an inner join to join
against related tables instead of an outer join. The purpose
of this option is generally one of performance, as inner joins
generally perform better than outer joins.
This flag can be set to ``True`` when the relationship references an
object via many-to-one using local foreign keys that are not
nullable, or when the reference is one-to-one or a collection that
is guaranteed to have one or at least one entry.
If the joined-eager load is chained onto an existing LEFT OUTER
JOIN, ``innerjoin=True`` will be bypassed and the join will continue
to chain as LEFT OUTER JOIN so that the results don't change. As an
alternative, specify the value ``"nested"``. This will instead nest
the join on the right side, e.g. using the form "a LEFT OUTER JOIN
(b JOIN c)".
.. versionadded:: 0.9.4 Added ``innerjoin="nested"`` option to
support nesting of eager "inner" joins.
.. seealso::
:ref:`what_kind_of_loading` - Discussion of some details of
various loader options.
:paramref:`.joinedload.innerjoin` - loader option version
:param join_depth:
when non-``None``, an integer value indicating how many levels
deep "eager" loaders should join on a self-referring or cyclical
relationship. The number counts how many times the same Mapper
shall be present in the loading condition along a particular join
branch. When left at its default of ``None``, eager loaders
will stop chaining when they encounter a the same target mapper
which is already higher up in the chain. This option applies
both to joined- and subquery- eager loaders.
.. seealso::
:ref:`self_referential_eager_loading` - Introductory documentation
and examples.
:param lazy='select': specifies
how the related items should be loaded. Default value is
``select``. Values include:
* ``select`` - items should be loaded lazily when the property is
first accessed, using a separate SELECT statement, or identity map
fetch for simple many-to-one references.
* ``immediate`` - items should be loaded as the parents are loaded,
using a separate SELECT statement, or identity map fetch for
simple many-to-one references.
* ``joined`` - items should be loaded "eagerly" in the same query as
that of the parent, using a JOIN or LEFT OUTER JOIN. Whether
the join is "outer" or not is determined by the
:paramref:`~.relationship.innerjoin` parameter.
* ``subquery`` - items should be loaded "eagerly" as the parents are
loaded, using one additional SQL statement, which issues a JOIN to
a subquery of the original statement, for each collection
requested.
* ``noload`` - no loading should occur at any time. This is to
support "write-only" attributes, or attributes which are
populated in some manner specific to the application.
* ``dynamic`` - the attribute will return a pre-configured
:class:`.Query` object for all read
operations, onto which further filtering operations can be
applied before iterating the results. See
the section :ref:`dynamic_relationship` for more details.
* True - a synonym for 'select'
* False - a synonym for 'joined'
* None - a synonym for 'noload'
.. seealso::
:doc:`/orm/loading_relationships` - Full documentation on relationship loader
configuration.
:ref:`dynamic_relationship` - detail on the ``dynamic`` option.
:param load_on_pending=False:
Indicates loading behavior for transient or pending parent objects.
When set to ``True``, causes the lazy-loader to
issue a query for a parent object that is not persistent, meaning it
has never been flushed. This may take effect for a pending object
when autoflush is disabled, or for a transient object that has been
"attached" to a :class:`.Session` but is not part of its pending
collection.
The :paramref:`~.relationship.load_on_pending` flag does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before a flush proceeds.
This flag is not not intended for general use.
.. seealso::
:meth:`.Session.enable_relationship_loading` - this method
establishes "load on pending" behavior for the whole object, and
also allows loading on objects that remain transient or
detached.
:param order_by:
indicates the ordering that should be applied when loading these
items. :paramref:`~.relationship.order_by` is expected to refer to
one of the :class:`.Column` objects to which the target class is
mapped, or the attribute itself bound to the target class which
refers to the column.
:paramref:`~.relationship.order_by` may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a Python-evaluable string when using Declarative.
:param passive_deletes=False:
Indicates loading behavior during delete operations.
A value of True indicates that unloaded child items should not
be loaded during a delete operation on the parent. Normally,
when a parent item is deleted, all child items are loaded so
that they can either be marked as deleted, or have their
foreign key to the parent set to NULL. Marking this flag as
True usually implies an ON DELETE <CASCADE|SET NULL> rule is in
place which will handle updating/deleting child rows on the
database side.
Additionally, setting the flag to the string value 'all' will
disable the "nulling out" of the child foreign keys, when there
is no delete or delete-orphan cascade enabled. This is
typically used when a triggering or error raise scenario is in
place on the database side. Note that the foreign key
attributes on in-session child objects will not be changed
after a flush occurs so this is a very special use-case
setting.
.. seealso::
:ref:`passive_deletes` - Introductory documentation
and examples.
:param passive_updates=True:
Indicates loading and INSERT/UPDATE/DELETE behavior when the
source of a foreign key value changes (i.e. an "on update"
cascade), which are typically the primary key columns of the
source row.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will
handle propagation of an UPDATE from a source column to
dependent rows. Note that with databases which enforce
referential integrity (i.e. PostgreSQL, MySQL with InnoDB tables),
ON UPDATE CASCADE is required for this operation. The
relationship() will update the value of the attribute on related
items which are locally present in the session during a flush.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The relationship() will issue the
appropriate UPDATE statements to the database in response to the
change of a referenced key, and items locally present in the
session during a flush will also be refreshed.
This flag should probably be set to False if primary key changes
are expected and the database in use doesn't support CASCADE
(i.e. SQLite, MySQL MyISAM tables).
.. seealso::
:ref:`passive_updates` - Introductory documentation and
examples.
:paramref:`.mapper.passive_updates` - a similar flag which
takes effect for joined-table inheritance mappings.
:param post_update:
this indicates that the relationship should be handled by a
second UPDATE statement after an INSERT or before a
DELETE. Currently, it also will issue an UPDATE after the
instance was UPDATEd as well, although this technically should
be improved. This flag is used to handle saving bi-directional
dependencies between two individual rows (i.e. each row
references the other), where it would otherwise be impossible to
INSERT or DELETE both rows fully since one row exists before the
other. Use this flag when a particular mapping arrangement will
incur two rows that are dependent on each other, such as a table
that has a one-to-many relationship to a set of child rows, and
also has a column that references a single child row within that
list (i.e. both tables contain a foreign key to each other). If
a flush operation returns an error that a "cyclical
dependency" was detected, this is a cue that you might want to
use :paramref:`~.relationship.post_update` to "break" the cycle.
.. seealso::
:ref:`post_update` - Introductory documentation and examples.
:param primaryjoin:
a SQL expression that will be used as the primary
join of this child object against the parent object, or in a
many-to-many relationship the join of the primary object to the
association table. By default, this value is computed based on the
foreign key relationships of the parent and child tables (or
association table).
:paramref:`~.relationship.primaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. seealso::
:ref:`relationship_primaryjoin`
:param remote_side:
used for self-referential relationships, indicates the column or
list of columns that form the "remote side" of the relationship.
:paramref:`.relationship.remote_side` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. versionchanged:: 0.8
The :func:`.remote` annotation can also be applied
directly to the ``primaryjoin`` expression, which is an
alternate, more specific system of describing which columns in a
particular ``primaryjoin`` should be considered "remote".
.. seealso::
:ref:`self_referential` - in-depth explanation of how
:paramref:`~.relationship.remote_side`
is used to configure self-referential relationships.
:func:`.remote` - an annotation function that accomplishes the
same purpose as :paramref:`~.relationship.remote_side`, typically
when a custom :paramref:`~.relationship.primaryjoin` condition
is used.
:param query_class:
a :class:`.Query` subclass that will be used as the base of the
"appender query" returned by a "dynamic" relationship, that
is, a relationship that specifies ``lazy="dynamic"`` or was
otherwise constructed using the :func:`.orm.dynamic_loader`
function.
.. seealso::
:ref:`dynamic_relationship` - Introduction to "dynamic"
relationship loaders.
:param secondaryjoin:
a SQL expression that will be used as the join of
an association table to the child object. By default, this value is
computed based on the foreign key relationships of the association
and child tables.
:paramref:`~.relationship.secondaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. seealso::
:ref:`relationship_primaryjoin`
:param single_parent:
when True, installs a validator which will prevent objects
from being associated with more than one parent at a time.
This is used for many-to-one or many-to-many relationships that
should be treated either as one-to-one or one-to-many. Its usage
is optional, except for :func:`.relationship` constructs which
are many-to-one or many-to-many and also
specify the ``delete-orphan`` cascade option. The
:func:`.relationship` construct itself will raise an error
instructing when this option is required.
.. seealso::
:ref:`unitofwork_cascades` - includes detail on when the
:paramref:`~.relationship.single_parent` flag may be appropriate.
:param uselist:
a boolean that indicates if this property should be loaded as a
list or a scalar. In most cases, this value is determined
automatically by :func:`.relationship` at mapper configuration
time, based on the type and direction
of the relationship - one to many forms a list, many to one
forms a scalar, many to many is a list. If a scalar is desired
where normally a list would be present, such as a bi-directional
one-to-one relationship, set :paramref:`~.relationship.uselist` to
False.
The :paramref:`~.relationship.uselist` flag is also available on an
existing :func:`.relationship` construct as a read-only attribute,
which can be used to determine if this :func:`.relationship` deals
with collections or scalar attributes::
>>> User.addresses.property.uselist
True
.. seealso::
:ref:`relationships_one_to_one` - Introduction to the "one to
one" relationship pattern, which is typically when the
:paramref:`~.relationship.uselist` flag is needed.
:param viewonly=False:
when set to True, the relationship is used only for loading objects,
and not for any persistence operation. A :func:`.relationship`
which specifies :paramref:`~.relationship.viewonly` can work
with a wider range of SQL operations within the
:paramref:`~.relationship.primaryjoin` condition, including
operations that feature the use of a variety of comparison operators
as well as SQL functions such as :func:`~.sql.expression.cast`. The
:paramref:`~.relationship.viewonly` flag is also of general use when
defining any kind of :func:`~.relationship` that doesn't represent
the full set of related objects, to prevent modifications of the
collection from resulting in persistence operations.
"""
self.uselist = uselist
self.argument = argument
self.secondary = secondary
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.post_update = post_update
self.direction = None
self.viewonly = viewonly
self.lazy = lazy
self.single_parent = single_parent
self._user_defined_foreign_keys = foreign_keys
self.collection_class = collection_class
self.passive_deletes = passive_deletes
self.cascade_backrefs = cascade_backrefs
self.passive_updates = passive_updates
self.remote_side = remote_side
self.enable_typechecks = enable_typechecks
self.query_class = query_class
self.innerjoin = innerjoin
self.distinct_target_key = distinct_target_key
self.doc = doc
self.active_history = active_history
self.join_depth = join_depth
self.local_remote_pairs = _local_remote_pairs
self.extension = extension
self.load_on_pending = load_on_pending
self.comparator_factory = comparator_factory or \
RelationshipProperty.Comparator
self.comparator = self.comparator_factory(self, None)
util.set_creation_order(self)
if info is not None:
self.info = info
if strategy_class:
self.strategy_class = strategy_class
else:
self.strategy_class = self._strategy_lookup(("lazy", self.lazy))
self._reverse_property = set()
self.cascade = cascade if cascade is not False \
else "save-update, merge"
self.order_by = order_by
self.back_populates = back_populates
if self.back_populates:
if backref:
raise sa_exc.ArgumentError(
"backref and back_populates keyword arguments "
"are mutually exclusive")
self.backref = None
else:
self.backref = backref
def instrument_class(self, mapper):
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.RelationshipProperty` attributes.
See the documentation for :class:`.PropComparator` for a brief
overview of ORM level operator definition.
See also:
:class:`.PropComparator`
:class:`.ColumnProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
_of_type = None
def __init__(
self, prop, parentmapper, adapt_to_entity=None, of_type=None):
"""Construction of :class:`.RelationshipProperty.Comparator`
is internal to the ORM's attribute mechanics.
"""
self.prop = prop
self._parentmapper = parentmapper
self._adapt_to_entity = adapt_to_entity
if of_type:
self._of_type = of_type
def adapt_to_entity(self, adapt_to_entity):
return self.__class__(self.property, self._parentmapper,
adapt_to_entity=adapt_to_entity,
of_type=self._of_type)
@util.memoized_property
def mapper(self):
"""The target :class:`.Mapper` referred to by this
:class:`.RelationshipProperty.Comparator`.
This is the "target" or "remote" side of the
:func:`.relationship`.
"""
return self.property.mapper
@util.memoized_property
def _parententity(self):
return self.property.parent
def _source_selectable(self):
if self._adapt_to_entity:
return self._adapt_to_entity.selectable
else:
return self.property.parent._with_polymorphic_selectable
def __clause_element__(self):
adapt_from = self._source_selectable()
if self._of_type:
of_type = inspect(self._of_type).mapper
else:
of_type = None
pj, sj, source, dest, \
secondary, target_adapter = self.property._create_joins(
source_selectable=adapt_from,
source_polymorphic=True,
of_type=of_type)
if sj is not None:
return pj & sj
else:
return pj
def of_type(self, cls):
"""Produce a construct that represents a particular 'subtype' of
attribute for the parent class.
Currently this is usable in conjunction with :meth:`.Query.join`
and :meth:`.Query.outerjoin`.
"""
return RelationshipProperty.Comparator(
self.property,
self._parentmapper,
adapt_to_entity=self._adapt_to_entity,
of_type=cls)
def in_(self, other):
"""Produce an IN clause - this is not implemented
for :func:`~.orm.relationship`-based attributes at this time.
"""
raise NotImplementedError('in_() not yet supported for '
'relationships. For a simple '
'many-to-one, use in_() against '
'the set of foreign key values.')
__hash__ = None
def __eq__(self, other):
"""Implement the ``==`` operator.
In a many-to-one context, such as::
MyClass.some_prop == <some object>
this will typically produce a
clause such as::
mytable.related_id == <some id>
Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use :meth:`~.RelationshipProperty.Comparator.contains`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce a NOT EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction in [ONETOMANY, MANYTOMANY]:
return ~self._criterion_exists()
else:
return _orm_annotate(self.property._optimized_compare(
None, adapt_source=self.adapter))
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection to an object or collection; "
"use contains() to test for membership.")
else:
return _orm_annotate(
self.property._optimized_compare(
other, adapt_source=self.adapter))
def _criterion_exists(self, criterion=None, **kwargs):
if getattr(self, '_of_type', None):
info = inspect(self._of_type)
target_mapper, to_selectable, is_aliased_class = \
info.mapper, info.selectable, info.is_aliased_class
if self.property._is_self_referential and not \
is_aliased_class:
to_selectable = to_selectable.alias()
single_crit = target_mapper._single_table_criterion
if single_crit is not None:
if criterion is not None:
criterion = single_crit & criterion
else:
criterion = single_crit
else:
is_aliased_class = False
to_selectable = None
if self.adapter:
source_selectable = self._source_selectable()
else:
source_selectable = None
pj, sj, source, dest, secondary, target_adapter = \
self.property._create_joins(
dest_polymorphic=True,
dest_selectable=to_selectable,
source_selectable=source_selectable)
for k in kwargs:
crit = getattr(self.property.mapper.class_, k) == kwargs[k]
if criterion is None:
criterion = crit
else:
criterion = criterion & crit
# annotate the *local* side of the join condition, in the case
# of pj + sj this is the full primaryjoin, in the case of just
# pj its the local side of the primaryjoin.
if sj is not None:
j = _orm_annotate(pj) & sj
else:
j = _orm_annotate(pj, exclude=self.property.remote_side)
if criterion is not None and target_adapter and not \
is_aliased_class:
# limit this adapter to annotated only?
criterion = target_adapter.traverse(criterion)
# only have the "joined left side" of what we
# return be subject to Query adaption. The right
# side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if criterion is not None:
criterion = criterion._annotate(
{'no_replacement_traverse': True})
crit = j & sql.True_._ifnone(criterion)
ex = sql.exists([1], crit, from_obj=dest).correlate_except(dest)
if secondary is not None:
ex = ex.correlate_except(secondary)
return ex
def any(self, criterion=None, **kwargs):
"""Produce an expression that tests a collection against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.any(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.any` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.any` is particularly
useful for testing for empty collections::
session.query(MyClass).filter(
~MyClass.somereference.any()
)
will produce::
SELECT * FROM my_table WHERE
NOT EXISTS (SELECT 1 FROM related WHERE
related.my_id=my_table.id)
:meth:`~.RelationshipProperty.Comparator.any` is only
valid for collections, i.e. a :func:`.relationship`
that has ``uselist=True``. For scalar references,
use :meth:`~.RelationshipProperty.Comparator.has`.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'any()' not implemented for scalar "
"attributes. Use has()."
)
return self._criterion_exists(criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Produce an expression that tests a scalar reference against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.has(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE
related.id==my_table.related_id AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.has` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.has` is only
valid for scalar references, i.e. a :func:`.relationship`
that has ``uselist=False``. For collection references,
use :meth:`~.RelationshipProperty.Comparator.any`.
"""
if self.property.uselist:
raise sa_exc.InvalidRequestError(
"'has()' not implemented for collections. "
"Use any().")
return self._criterion_exists(criterion, **kwargs)
def contains(self, other, **kwargs):
"""Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.RelationshipProperty.Comparator.contains` is
only valid for a collection, i.e. a
:func:`~.orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
Produces a clause like::
mytable.id == <some id>
Where ``<some id>`` is the value of the foreign key
attribute on ``other`` which refers to the primary
key of its parent object. From this it follows that
:meth:`~.RelationshipProperty.Comparator.contains` is
very useful when used with simple one-to-many
operations.
For many-to-many operations, the behavior of
:meth:`~.RelationshipProperty.Comparator.contains`
has more caveats. The association table will be
rendered in the statement, producing an "implicit"
join, that is, includes multiple tables in the FROM
clause which are equated in the WHERE clause::
query(MyClass).filter(MyClass.contains(other))
Produces a query like::
SELECT * FROM my_table, my_association_table AS
my_association_table_1 WHERE
my_table.id = my_association_table_1.parent_id
AND my_association_table_1.child_id = <some id>
Where ``<some id>`` would be the primary key of
``other``. From the above, it is clear that
:meth:`~.RelationshipProperty.Comparator.contains`
will **not** work with many-to-many collections when
used in queries that move beyond simple AND
conjunctions, such as multiple
:meth:`~.RelationshipProperty.Comparator.contains`
expressions joined by OR. In such cases subqueries or
explicit "outer joins" will need to be used instead.
See :meth:`~.RelationshipProperty.Comparator.any` for
a less-performant alternative using EXISTS, or refer
to :meth:`.Query.outerjoin` as well as :ref:`ormtutorial_joins`
for more details on constructing outer joins.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'contains' not implemented for scalar "
"attributes. Use ==")
clause = self.property._optimized_compare(
other, adapt_source=self.adapter)
if self.property.secondaryjoin is not None:
clause.negation_clause = \
self.__negated_contains_or_equals(other)
return clause
def __negated_contains_or_equals(self, other):
if self.property.direction == MANYTOONE:
state = attributes.instance_state(other)
def state_bindparam(x, state, col):
o = state.obj() # strong ref
return sql.bindparam(
x, unique=True, callable_=lambda:
self.property.mapper.
_get_committed_attr_by_column(o, col))
def adapt(col):
if self.adapter:
return self.adapter(col)
else:
return col
if self.property._use_get:
return sql.and_(*[
sql.or_(
adapt(x) != state_bindparam(adapt(x), state, y),
adapt(x) == None)
for (x, y) in self.property.local_remote_pairs])
criterion = sql.and_(*[x == y for (x, y) in
zip(
self.property.mapper.primary_key,
self.property.
mapper.
primary_key_from_instance(other))
])
return ~self._criterion_exists(criterion)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a many-to-one context, such as::
MyClass.some_prop != <some object>
This will typically produce a clause such as::
mytable.related_id != <some id>
Where ``<some id>`` is the primary key of the
given object.
The ``!=`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use
:meth:`~.RelationshipProperty.Comparator.contains`
in conjunction with :func:`~.expression.not_`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` in
conjunction with :func:`~.expression.not_` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce an EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction == MANYTOONE:
return _orm_annotate(~self.property._optimized_compare(
None, adapt_source=self.adapter))
else:
return self._criterion_exists()
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection"
" to an object or collection; use "
"contains() to test for membership.")
else:
return _orm_annotate(self.__negated_contains_or_equals(other))
@util.memoized_property
def property(self):
if mapperlib.Mapper._new_mappers:
mapperlib.Mapper._configure_all()
return self.prop
def compare(self, op, value,
value_is_parent=False,
alias_secondary=True):
if op == operators.eq:
if value is None:
if self.uselist:
return ~sql.exists([1], self.primaryjoin)
else:
return self._optimized_compare(
None,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
return self._optimized_compare(
value,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
return op(self.comparator, value)
def _optimized_compare(self, value, value_is_parent=False,
adapt_source=None,
alias_secondary=True):
if value is not None:
value = attributes.instance_state(value)
return self._lazy_strategy.lazy_clause(
value,
reverse_direction=not value_is_parent,
alias_secondary=alias_secondary,
adapt_source=adapt_source)
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
def merge(self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load, _recursive):
if load:
for r in self._reverse_property:
if (source_state, r) in _recursive:
return
if "merge" not in self._cascade:
return
if self.key not in source_dict:
return
if self.uselist:
instances = source_state.get_impl(self.key).\
get(source_state, source_dict)
if hasattr(instances, '_sa_adapter'):
# convert collections to adapters to get a true iterator
instances = instances._sa_adapter
if load:
# for a full merge, pre-load the destination collection,
# so that individual _merge of each item pulls from identity
# map for those already present.
# also assumes CollectionAttrbiuteImpl behavior of loading
# "old" list in any case
dest_state.get_impl(self.key).get(dest_state, dest_dict)
dest_list = []
for current in instances:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(current_state, current_dict,
load=load, _recursive=_recursive)
if obj is not None:
dest_list.append(obj)
if not load:
coll = attributes.init_state_collection(dest_state,
dest_dict, self.key)
for c in dest_list:
coll.append_without_event(c)
else:
dest_state.get_impl(self.key)._set_iterable(
dest_state, dest_dict, dest_list)
else:
current = source_dict[self.key]
if current is not None:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(current_state, current_dict,
load=load, _recursive=_recursive)
else:
obj = None
if not load:
dest_dict[self.key] = obj
else:
dest_state.get_impl(self.key).set(dest_state,
dest_dict, obj, None)
def _value_as_iterable(self, state, dict_, key,
passive=attributes.PASSIVE_OFF):
"""Return a list of tuples (state, obj) for the given
key.
returns an empty list if the value is None/empty/PASSIVE_NO_RESULT
"""
impl = state.manager[key].impl
x = impl.get(state, dict_, passive=passive)
if x is attributes.PASSIVE_NO_RESULT or x is None:
return []
elif hasattr(impl, 'get_collection'):
return [
(attributes.instance_state(o), o) for o in
impl.get_collection(state, dict_, x, passive=passive)
]
else:
return [(attributes.instance_state(x), x)]
def cascade_iterator(self, type_, state, dict_,
visited_states, halt_on=None):
# assert type_ in self._cascade
# only actively lazy load on the 'delete' cascade
if type_ != 'delete' or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
if type_ == 'save-update':
tuples = state.manager[self.key].impl.\
get_all_pending(state, dict_)
else:
tuples = self._value_as_iterable(state, dict_, self.key,
passive=passive)
skip_pending = type_ == 'refresh-expire' and 'delete-orphan' \
not in self._cascade
for instance_state, c in tuples:
if instance_state in visited_states:
continue
if c is None:
# would like to emit a warning here, but
# would not be consistent with collection.append(None)
# current behavior of silently skipping.
# see [ticket:2229]
continue
instance_dict = attributes.instance_dict(c)
if halt_on and halt_on(instance_state):
continue
if skip_pending and not instance_state.key:
continue
instance_mapper = instance_state.manager.mapper
if not instance_mapper.isa(self.mapper.class_manager.mapper):
raise AssertionError("Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'" % (
self.key,
self.parent.class_,
c.__class__
))
visited_states.add(instance_state)
yield c, instance_mapper, instance_state, instance_dict
def _add_reverse_property(self, key):
other = self.mapper.get_property(key, _configure_mappers=False)
self._reverse_property.add(other)
other._reverse_property.add(self)
if not other.mapper.common_parent(self.parent):
raise sa_exc.ArgumentError(
'reverse_property %r on '
'relationship %s references relationship %s, which '
'does not reference mapper %s' %
(key, self, other, self.parent))
if self.direction in (ONETOMANY, MANYTOONE) and self.direction \
== other.direction:
raise sa_exc.ArgumentError(
'%s and back-reference %s are '
'both of the same direction %r. Did you mean to '
'set remote_side on the many-to-one side ?' %
(other, self, self.direction))
@util.memoized_property
def mapper(self):
"""Return the targeted :class:`.Mapper` for this
:class:`.RelationshipProperty`.
This is a lazy-initializing static attribute.
"""
if util.callable(self.argument) and \
not isinstance(self.argument, (type, mapperlib.Mapper)):
argument = self.argument()
else:
argument = self.argument
if isinstance(argument, type):
mapper_ = mapperlib.class_mapper(argument,
configure=False)
elif isinstance(self.argument, mapperlib.Mapper):
mapper_ = argument
else:
raise sa_exc.ArgumentError(
"relationship '%s' expects "
"a class or a mapper argument (received: %s)"
% (self.key, type(argument)))
return mapper_
@util.memoized_property
@util.deprecated("0.7", "Use .target")
def table(self):
"""Return the selectable linked to this
:class:`.RelationshipProperty` object's target
:class:`.Mapper`.
"""
return self.target
def do_init(self):
self._check_conflicts()
self._process_dependent_arguments()
self._setup_join_conditions()
self._check_cascade_settings(self._cascade)
self._post_init()
self._generate_backref()
super(RelationshipProperty, self).do_init()
self._lazy_strategy = self._get_strategy((("lazy", "select"),))
def _process_dependent_arguments(self):
"""Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
"""
# accept callables for other attributes which may require
# deferred initialization. This technique is used
# by declarative "string configs" and some recipes.
for attr in (
'order_by', 'primaryjoin', 'secondaryjoin',
'secondary', '_user_defined_foreign_keys', 'remote_side',
):
attr_value = getattr(self, attr)
if util.callable(attr_value):
setattr(self, attr, attr_value())
# remove "annotations" which are present if mapped class
# descriptors are used to create the join expression.
for attr in 'primaryjoin', 'secondaryjoin':
val = getattr(self, attr)
if val is not None:
setattr(self, attr, _orm_deannotate(
expression._only_column_elements(val, attr))
)
# ensure expressions in self.order_by, foreign_keys,
# remote_side are all columns, not strings.
if self.order_by is not False and self.order_by is not None:
self.order_by = [
expression._only_column_elements(x, "order_by")
for x in
util.to_list(self.order_by)]
self._user_defined_foreign_keys = \
util.column_set(
expression._only_column_elements(x, "foreign_keys")
for x in util.to_column_set(
self._user_defined_foreign_keys
))
self.remote_side = \
util.column_set(
expression._only_column_elements(x, "remote_side")
for x in
util.to_column_set(self.remote_side))
self.target = self.mapper.mapped_table
def _setup_join_conditions(self):
self._join_condition = jc = JoinCondition(
parent_selectable=self.parent.mapped_table,
child_selectable=self.mapper.mapped_table,
parent_local_selectable=self.parent.local_table,
child_local_selectable=self.mapper.local_table,
primaryjoin=self.primaryjoin,
secondary=self.secondary,
secondaryjoin=self.secondaryjoin,
parent_equivalents=self.parent._equivalent_columns,
child_equivalents=self.mapper._equivalent_columns,
consider_as_foreign_keys=self._user_defined_foreign_keys,
local_remote_pairs=self.local_remote_pairs,
remote_side=self.remote_side,
self_referential=self._is_self_referential,
prop=self,
support_sync=not self.viewonly,
can_be_synced_fn=self._columns_are_mapped
)
self.primaryjoin = jc.deannotated_primaryjoin
self.secondaryjoin = jc.deannotated_secondaryjoin
self.direction = jc.direction
self.local_remote_pairs = jc.local_remote_pairs
self.remote_side = jc.remote_columns
self.local_columns = jc.local_columns
self.synchronize_pairs = jc.synchronize_pairs
self._calculated_foreign_keys = jc.foreign_key_columns
self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs
def _check_conflicts(self):
"""Test that this relationship is legal, warn about
inheritance conflicts."""
if not self.is_primary() and not mapperlib.class_mapper(
self.parent.class_,
configure=False).has_property(self.key):
raise sa_exc.ArgumentError(
"Attempting to assign a new "
"relationship '%s' to a non-primary mapper on "
"class '%s'. New relationships can only be added "
"to the primary mapper, i.e. the very first mapper "
"created for class '%s' " %
(self.key, self.parent.class_.__name__,
self.parent.class_.__name__))
# check for conflicting relationship() on superclass
if not self.parent.concrete:
for inheriting in self.parent.iterate_to_root():
if inheriting is not self.parent \
and inheriting.has_property(self.key):
util.warn("Warning: relationship '%s' on mapper "
"'%s' supersedes the same relationship "
"on inherited mapper '%s'; this can "
"cause dependency issues during flush"
% (self.key, self.parent, inheriting))
def _get_cascade(self):
"""Return the current cascade setting for this
:class:`.RelationshipProperty`.
"""
return self._cascade
def _set_cascade(self, cascade):
cascade = CascadeOptions(cascade)
if 'mapper' in self.__dict__:
self._check_cascade_settings(cascade)
self._cascade = cascade
if self._dependency_processor:
self._dependency_processor.cascade = cascade
cascade = property(_get_cascade, _set_cascade)
def _check_cascade_settings(self, cascade):
if cascade.delete_orphan and not self.single_parent \
and (self.direction is MANYTOMANY or self.direction
is MANYTOONE):
raise sa_exc.ArgumentError(
'On %s, delete-orphan cascade is not supported '
'on a many-to-many or many-to-one relationship '
'when single_parent is not set. Set '
'single_parent=True on the relationship().'
% self)
if self.direction is MANYTOONE and self.passive_deletes:
util.warn("On %s, 'passive_deletes' is normally configured "
"on one-to-many, one-to-one, many-to-many "
"relationships only."
% self)
if self.passive_deletes == 'all' and \
("delete" in cascade or
"delete-orphan" in cascade):
raise sa_exc.ArgumentError(
"On %s, can't set passive_deletes='all' in conjunction "
"with 'delete' or 'delete-orphan' cascade" % self)
if cascade.delete_orphan:
self.mapper.primary_mapper()._delete_orphans.append(
(self.key, self.parent.class_)
)
def _columns_are_mapped(self, *cols):
"""Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.Relationship`.
"""
for c in cols:
if self.secondary is not None \
and self.secondary.c.contains_column(c):
continue
if not self.parent.mapped_table.c.contains_column(c) and \
not self.target.c.contains_column(c):
return False
return True
def _generate_backref(self):
"""Interpret the 'backref' instruction to create a
:func:`.relationship` complementary to this one."""
if not self.is_primary():
return
if self.backref is not None and not self.back_populates:
if isinstance(self.backref, util.string_types):
backref_key, kwargs = self.backref, {}
else:
backref_key, kwargs = self.backref
mapper = self.mapper.primary_mapper()
check = set(mapper.iterate_to_root()).\
union(mapper.self_and_descendants)
for m in check:
if m.has_property(backref_key):
raise sa_exc.ArgumentError(
"Error creating backref "
"'%s' on relationship '%s': property of that "
"name exists on mapper '%s'" %
(backref_key, self, m))
# determine primaryjoin/secondaryjoin for the
# backref. Use the one we had, so that
# a custom join doesn't have to be specified in
# both directions.
if self.secondary is not None:
# for many to many, just switch primaryjoin/
# secondaryjoin. use the annotated
# pj/sj on the _join_condition.
pj = kwargs.pop(
'primaryjoin',
self._join_condition.secondaryjoin_minus_local)
sj = kwargs.pop(
'secondaryjoin',
self._join_condition.primaryjoin_minus_local)
else:
pj = kwargs.pop(
'primaryjoin',
self._join_condition.primaryjoin_reverse_remote)
sj = kwargs.pop('secondaryjoin', None)
if sj:
raise sa_exc.InvalidRequestError(
"Can't assign 'secondaryjoin' on a backref "
"against a non-secondary relationship."
)
foreign_keys = kwargs.pop('foreign_keys',
self._user_defined_foreign_keys)
parent = self.parent.primary_mapper()
kwargs.setdefault('viewonly', self.viewonly)
kwargs.setdefault('post_update', self.post_update)
kwargs.setdefault('passive_updates', self.passive_updates)
self.back_populates = backref_key
relationship = RelationshipProperty(
parent, self.secondary,
pj, sj,
foreign_keys=foreign_keys,
back_populates=self.key,
**kwargs)
mapper._configure_property(backref_key, relationship)
if self.back_populates:
self._add_reverse_property(self.back_populates)
def _post_init(self):
if self.uselist is None:
self.uselist = self.direction is not MANYTOONE
if not self.viewonly:
self._dependency_processor = \
dependency.DependencyProcessor.from_relationship(self)
@util.memoized_property
def _use_get(self):
"""memoize the 'use_get' attribute of this RelationshipLoader's
lazyloader."""
strategy = self._lazy_strategy
return strategy.use_get
@util.memoized_property
def _is_self_referential(self):
return self.mapper.common_parent(self.parent)
def _create_joins(self, source_polymorphic=False,
source_selectable=None, dest_polymorphic=False,
dest_selectable=None, of_type=None):
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
source_selectable = self.parent._with_polymorphic_selectable
aliased = False
if dest_selectable is None:
if dest_polymorphic and self.mapper.with_polymorphic:
dest_selectable = self.mapper._with_polymorphic_selectable
aliased = True
else:
dest_selectable = self.mapper.mapped_table
if self._is_self_referential and source_selectable is None:
dest_selectable = dest_selectable.alias()
aliased = True
else:
aliased = True
dest_mapper = of_type or self.mapper
single_crit = dest_mapper._single_table_criterion
aliased = aliased or (source_selectable is not None)
primaryjoin, secondaryjoin, secondary, target_adapter, dest_selectable = \
self._join_condition.join_targets(
source_selectable, dest_selectable, aliased, single_crit
)
if source_selectable is None:
source_selectable = self.parent.local_table
if dest_selectable is None:
dest_selectable = self.mapper.local_table
return (primaryjoin, secondaryjoin, source_selectable,
dest_selectable, secondary, target_adapter)
def _annotate_columns(element, annotations):
def clone(elem):
if isinstance(elem, expression.ColumnClause):
elem = elem._annotate(annotations.copy())
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
return element
class JoinCondition(object):
def __init__(self,
parent_selectable,
child_selectable,
parent_local_selectable,
child_local_selectable,
primaryjoin=None,
secondary=None,
secondaryjoin=None,
parent_equivalents=None,
child_equivalents=None,
consider_as_foreign_keys=None,
local_remote_pairs=None,
remote_side=None,
self_referential=False,
prop=None,
support_sync=True,
can_be_synced_fn=lambda *c: True
):
self.parent_selectable = parent_selectable
self.parent_local_selectable = parent_local_selectable
self.child_selectable = child_selectable
self.child_local_selectable = child_local_selectable
self.parent_equivalents = parent_equivalents
self.child_equivalents = child_equivalents
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.secondary = secondary
self.consider_as_foreign_keys = consider_as_foreign_keys
self._local_remote_pairs = local_remote_pairs
self._remote_side = remote_side
self.prop = prop
self.self_referential = self_referential
self.support_sync = support_sync
self.can_be_synced_fn = can_be_synced_fn
self._determine_joins()
self._annotate_fks()
self._annotate_remote()
self._annotate_local()
self._setup_pairs()
self._check_foreign_cols(self.primaryjoin, True)
if self.secondaryjoin is not None:
self._check_foreign_cols(self.secondaryjoin, False)
self._determine_direction()
self._check_remote_side()
self._log_joins()
def _log_joins(self):
if self.prop is None:
return
log = self.prop.logger
log.info('%s setup primary join %s', self.prop,
self.primaryjoin)
log.info('%s setup secondary join %s', self.prop,
self.secondaryjoin)
log.info('%s synchronize pairs [%s]', self.prop,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.synchronize_pairs))
log.info('%s secondary synchronize pairs [%s]', self.prop,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.secondary_synchronize_pairs or []))
log.info('%s local/remote pairs [%s]', self.prop,
','.join('(%s / %s)' % (l, r) for (l, r) in
self.local_remote_pairs))
log.info('%s remote columns [%s]', self.prop,
','.join('%s' % col for col in self.remote_columns)
)
log.info('%s local columns [%s]', self.prop,
','.join('%s' % col for col in self.local_columns)
)
log.info('%s relationship direction %s', self.prop,
self.direction)
def _determine_joins(self):
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError(
"Property %s specified with secondary "
"join condition but "
"no secondary argument" % self.prop)
# find a join between the given mapper's mapped table and
# the given table. will try the mapper's local table first
# for more specificity, then if not found will try the more
# general mapped table, which in the case of inheritance is
# a join.
try:
consider_as_foreign_keys = self.consider_as_foreign_keys or None
if self.secondary is not None:
if self.secondaryjoin is None:
self.secondaryjoin = \
join_condition(
self.child_selectable,
self.secondary,
a_subset=self.child_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys
)
if self.primaryjoin is None:
self.primaryjoin = \
join_condition(
self.parent_selectable,
self.secondary,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys
)
else:
if self.primaryjoin is None:
self.primaryjoin = \
join_condition(
self.parent_selectable,
self.child_selectable,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys
)
except sa_exc.NoForeignKeysError:
if self.secondary is not None:
raise sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify 'primaryjoin' and 'secondaryjoin' "
"expressions." % (self.prop, self.secondary))
else:
raise sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify a 'primaryjoin' expression." % self.prop)
except sa_exc.AmbiguousForeignKeysError:
if self.secondary is not None:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables via secondary table '%s'. "
"Specify the 'foreign_keys' "
"argument, providing a list of those columns which "
"should be counted as containing a foreign key "
"reference from the secondary table to each of the "
"parent and child tables."
% (self.prop, self.secondary))
else:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables. Specify the "
"'foreign_keys' argument, providing a list of those "
"columns which should be counted as containing a "
"foreign key reference to the parent table."
% self.prop)
@property
def primaryjoin_minus_local(self):
return _deep_deannotate(self.primaryjoin, values=("local", "remote"))
@property
def secondaryjoin_minus_local(self):
return _deep_deannotate(self.secondaryjoin,
values=("local", "remote"))
@util.memoized_property
def primaryjoin_reverse_remote(self):
"""Return the primaryjoin condition suitable for the
"reverse" direction.
If the primaryjoin was delivered here with pre-existing
"remote" annotations, the local/remote annotations
are reversed. Otherwise, the local/remote annotations
are removed.
"""
if self._has_remote_annotations:
def replace(element):
if "remote" in element._annotations:
v = element._annotations.copy()
del v['remote']
v['local'] = True
return element._with_annotations(v)
elif "local" in element._annotations:
v = element._annotations.copy()
del v['local']
v['remote'] = True
return element._with_annotations(v)
return visitors.replacement_traverse(
self.primaryjoin, {}, replace)
else:
if self._has_foreign_annotations:
# TODO: coverage
return _deep_deannotate(self.primaryjoin,
values=("local", "remote"))
else:
return _deep_deannotate(self.primaryjoin)
def _has_annotation(self, clause, annotation):
for col in visitors.iterate(clause, {}):
if annotation in col._annotations:
return True
else:
return False
@util.memoized_property
def _has_foreign_annotations(self):
return self._has_annotation(self.primaryjoin, "foreign")
@util.memoized_property
def _has_remote_annotations(self):
return self._has_annotation(self.primaryjoin, "remote")
def _annotate_fks(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'foreign' annotations marking columns
considered as foreign.
"""
if self._has_foreign_annotations:
return
if self.consider_as_foreign_keys:
self._annotate_from_fk_list()
else:
self._annotate_present_fks()
def _annotate_from_fk_list(self):
def check_fk(col):
if col in self.consider_as_foreign_keys:
return col._annotate({"foreign": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin,
{},
check_fk
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin,
{},
check_fk
)
def _annotate_present_fks(self):
if self.secondary is not None:
secondarycols = util.column_set(self.secondary.c)
else:
secondarycols = set()
def is_foreign(a, b):
if isinstance(a, schema.Column) and \
isinstance(b, schema.Column):
if a.references(b):
return a
elif b.references(a):
return b
if secondarycols:
if a in secondarycols and b not in secondarycols:
return a
elif b in secondarycols and a not in secondarycols:
return b
def visit_binary(binary):
if not isinstance(binary.left, sql.ColumnElement) or \
not isinstance(binary.right, sql.ColumnElement):
return
if "foreign" not in binary.left._annotations and \
"foreign" not in binary.right._annotations:
col = is_foreign(binary.left, binary.right)
if col is not None:
if col.compare(binary.left):
binary.left = binary.left._annotate(
{"foreign": True})
elif col.compare(binary.right):
binary.right = binary.right._annotate(
{"foreign": True})
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin,
{},
{"binary": visit_binary}
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.cloned_traverse(
self.secondaryjoin,
{},
{"binary": visit_binary}
)
def _refers_to_parent_table(self):
"""Return True if the join condition contains column
comparisons where both columns are in both tables.
"""
pt = self.parent_selectable
mt = self.child_selectable
result = [False]
def visit_binary(binary):
c, f = binary.left, binary.right
if (
isinstance(c, expression.ColumnClause) and
isinstance(f, expression.ColumnClause) and
pt.is_derived_from(c.table) and
pt.is_derived_from(f.table) and
mt.is_derived_from(c.table) and
mt.is_derived_from(f.table)
):
result[0] = True
visitors.traverse(
self.primaryjoin,
{},
{"binary": visit_binary}
)
return result[0]
def _tables_overlap(self):
"""Return True if parent/child tables have some overlap."""
return selectables_overlap(
self.parent_selectable, self.child_selectable)
def _annotate_remote(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'remote' annotations marking columns
considered as part of the 'remote' side.
"""
if self._has_remote_annotations:
return
if self.secondary is not None:
self._annotate_remote_secondary()
elif self._local_remote_pairs or self._remote_side:
self._annotate_remote_from_args()
elif self._refers_to_parent_table():
self._annotate_selfref(lambda col: "foreign" in col._annotations, False)
elif self._tables_overlap():
self._annotate_remote_with_overlap()
else:
self._annotate_remote_distinct_selectables()
def _annotate_remote_secondary(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when 'secondary' is present.
"""
def repl(element):
if self.secondary.c.contains_column(element):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, repl)
def _annotate_selfref(self, fn, remote_side_given):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the relationship is detected as self-referential.
"""
def visit_binary(binary):
equated = binary.left.compare(binary.right)
if isinstance(binary.left, expression.ColumnClause) and \
isinstance(binary.right, expression.ColumnClause):
# assume one to many - FKs are "remote"
if fn(binary.left):
binary.left = binary.left._annotate({"remote": True})
if fn(binary.right) and not equated:
binary.right = binary.right._annotate(
{"remote": True})
elif not remote_side_given:
self._warn_non_column_elements()
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {},
{"binary": visit_binary})
def _annotate_remote_from_args(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the 'remote_side' or '_local_remote_pairs'
arguments are used.
"""
if self._local_remote_pairs:
if self._remote_side:
raise sa_exc.ArgumentError(
"remote_side argument is redundant "
"against more detailed _local_remote_side "
"argument.")
remote_side = [r for (l, r) in self._local_remote_pairs]
else:
remote_side = self._remote_side
if self._refers_to_parent_table():
self._annotate_selfref(lambda col: col in remote_side, True)
else:
def repl(element):
if element in remote_side:
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
def _annotate_remote_with_overlap(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables have some set of
tables in common, though is not a fully self-referential
relationship.
"""
def visit_binary(binary):
binary.left, binary.right = proc_left_right(binary.left,
binary.right)
binary.right, binary.left = proc_left_right(binary.right,
binary.left)
def proc_left_right(left, right):
if isinstance(left, expression.ColumnClause) and \
isinstance(right, expression.ColumnClause):
if self.child_selectable.c.contains_column(right) and \
self.parent_selectable.c.contains_column(left):
right = right._annotate({"remote": True})
else:
self._warn_non_column_elements()
return left, right
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {},
{"binary": visit_binary})
def _annotate_remote_distinct_selectables(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables are entirely
separate.
"""
def repl(element):
if self.child_selectable.c.contains_column(element) and \
(not self.parent_local_selectable.c.
contains_column(element) or
self.child_local_selectable.c.
contains_column(element)):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
def _warn_non_column_elements(self):
util.warn(
"Non-simple column elements in primary "
"join condition for property %s - consider using "
"remote() annotations to mark the remote side."
% self.prop
)
def _annotate_local(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'local' annotations.
This annotates all column elements found
simultaneously in the parent table
and the join condition that don't have a
'remote' annotation set up from
_annotate_remote() or user-defined.
"""
if self._has_annotation(self.primaryjoin, "local"):
return
if self._local_remote_pairs:
local_side = util.column_set([l for (l, r)
in self._local_remote_pairs])
else:
local_side = util.column_set(self.parent_selectable.c)
def locals_(elem):
if "remote" not in elem._annotations and \
elem in local_side:
return elem._annotate({"local": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, locals_
)
def _check_remote_side(self):
if not self.local_remote_pairs:
raise sa_exc.ArgumentError(
'Relationship %s could '
'not determine any unambiguous local/remote column '
'pairs based on join condition and remote_side '
'arguments. '
'Consider using the remote() annotation to '
'accurately mark those elements of the join '
'condition that are on the remote side of '
'the relationship.' % (self.prop, ))
def _check_foreign_cols(self, join_condition, primary):
"""Check the foreign key columns collected and emit error
messages."""
can_sync = False
foreign_cols = self._gather_columns_with_annotation(
join_condition, "foreign")
has_foreign = bool(foreign_cols)
if primary:
can_sync = bool(self.synchronize_pairs)
else:
can_sync = bool(self.secondary_synchronize_pairs)
if self.support_sync and can_sync or \
(not self.support_sync and has_foreign):
return
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if self.support_sync and has_foreign and not can_sync:
err = "Could not locate any simple equality expressions "\
"involving locally mapped foreign key columns for "\
"%s join condition "\
"'%s' on relationship %s." % (
primary and 'primary' or 'secondary',
join_condition,
self.prop
)
err += \
" Ensure that referencing columns are associated "\
"with a ForeignKey or ForeignKeyConstraint, or are "\
"annotated in the join condition with the foreign() "\
"annotation. To allow comparison operators other than "\
"'==', the relationship can be marked as viewonly=True."
raise sa_exc.ArgumentError(err)
else:
err = "Could not locate any relevant foreign key columns "\
"for %s join condition '%s' on relationship %s." % (
primary and 'primary' or 'secondary',
join_condition,
self.prop
)
err += \
' Ensure that referencing columns are associated '\
'with a ForeignKey or ForeignKeyConstraint, or are '\
'annotated in the join condition with the foreign() '\
'annotation.'
raise sa_exc.ArgumentError(err)
def _determine_direction(self):
"""Determine if this relationship is one to many, many to one,
many to many.
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
else:
parentcols = util.column_set(self.parent_selectable.c)
targetcols = util.column_set(self.child_selectable.c)
# fk collection which suggests ONETOMANY.
onetomany_fk = targetcols.intersection(
self.foreign_key_columns)
# fk collection which suggests MANYTOONE.
manytoone_fk = parentcols.intersection(
self.foreign_key_columns)
if onetomany_fk and manytoone_fk:
# fks on both sides. test for overlap of local/remote
# with foreign key.
# we will gather columns directly from their annotations
# without deannotating, so that we can distinguish on a column
# that refers to itself.
# 1. columns that are both remote and FK suggest
# onetomany.
onetomany_local = self._gather_columns_with_annotation(
self.primaryjoin, "remote", "foreign")
# 2. columns that are FK but are not remote (e.g. local)
# suggest manytoone.
manytoone_local = set([c for c in
self._gather_columns_with_annotation(
self.primaryjoin,
"foreign")
if "remote" not in c._annotations])
# 3. if both collections are present, remove columns that
# refer to themselves. This is for the case of
# and_(Me.id == Me.remote_id, Me.version == Me.version)
if onetomany_local and manytoone_local:
self_equated = self.remote_columns.intersection(
self.local_columns
)
onetomany_local = onetomany_local.difference(self_equated)
manytoone_local = manytoone_local.difference(self_equated)
# at this point, if only one or the other collection is
# present, we know the direction, otherwise it's still
# ambiguous.
if onetomany_local and not manytoone_local:
self.direction = ONETOMANY
elif manytoone_local and not onetomany_local:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns within the join condition are present "
"in both the parent and the child's mapped tables. "
"Ensure that only those columns referring "
"to a parent column are marked as foreign, "
"either via the foreign() annotation or "
"via the foreign_keys argument." % self.prop)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship "
"direction for relationship '%s' - foreign "
"key columns are present in neither the parent "
"nor the child's mapped tables" % self.prop)
def _deannotate_pairs(self, collection):
"""provide deannotation for the various lists of
pairs, so that using them in hashes doesn't incur
high-overhead __eq__() comparisons against
original columns mapped.
"""
return [(x._deannotate(), y._deannotate())
for x, y in collection]
def _setup_pairs(self):
sync_pairs = []
lrp = util.OrderedSet([])
secondary_sync_pairs = []
def go(joincond, collection):
def visit_binary(binary, left, right):
if "remote" in right._annotations and \
"remote" not in left._annotations and \
self.can_be_synced_fn(left):
lrp.add((left, right))
elif "remote" in left._annotations and \
"remote" not in right._annotations and \
self.can_be_synced_fn(right):
lrp.add((right, left))
if binary.operator is operators.eq and \
self.can_be_synced_fn(left, right):
if "foreign" in right._annotations:
collection.append((left, right))
elif "foreign" in left._annotations:
collection.append((right, left))
visit_binary_product(visit_binary, joincond)
for joincond, collection in [
(self.primaryjoin, sync_pairs),
(self.secondaryjoin, secondary_sync_pairs)
]:
if joincond is None:
continue
go(joincond, collection)
self.local_remote_pairs = self._deannotate_pairs(lrp)
self.synchronize_pairs = self._deannotate_pairs(sync_pairs)
self.secondary_synchronize_pairs = \
self._deannotate_pairs(secondary_sync_pairs)
@util.memoized_property
def remote_columns(self):
return self._gather_join_annotations("remote")
@util.memoized_property
def local_columns(self):
return self._gather_join_annotations("local")
@util.memoized_property
def foreign_key_columns(self):
return self._gather_join_annotations("foreign")
@util.memoized_property
def deannotated_primaryjoin(self):
return _deep_deannotate(self.primaryjoin)
@util.memoized_property
def deannotated_secondaryjoin(self):
if self.secondaryjoin is not None:
return _deep_deannotate(self.secondaryjoin)
else:
return None
def _gather_join_annotations(self, annotation):
s = set(
self._gather_columns_with_annotation(
self.primaryjoin, annotation)
)
if self.secondaryjoin is not None:
s.update(
self._gather_columns_with_annotation(
self.secondaryjoin, annotation)
)
return set([x._deannotate() for x in s])
def _gather_columns_with_annotation(self, clause, *annotation):
annotation = set(annotation)
return set([
col for col in visitors.iterate(clause, {})
if annotation.issubset(col._annotations)
])
def join_targets(self, source_selectable,
dest_selectable,
aliased,
single_crit=None):
"""Given a source and destination selectable, create a
join between them.
This takes into account aliasing the join clause
to reference the appropriate corresponding columns
in the target objects, as well as the extra child
criterion, equivalent column sets, etc.
"""
# place a barrier on the destination such that
# replacement traversals won't ever dig into it.
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable,
{'no_replacement_traverse': True})
primaryjoin, secondaryjoin, secondary = self.primaryjoin, \
self.secondaryjoin, self.secondary
# adjust the join condition for single table inheritance,
# in the case that the join is to a subclass
# this is analogous to the
# "_adjust_for_single_table_inheritance()" method in Query.
if single_crit is not None:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & single_crit
else:
primaryjoin = primaryjoin & single_crit
if aliased:
if secondary is not None:
secondary = secondary.alias(flat=True)
primary_aliasizer = ClauseAdapter(secondary)
secondary_aliasizer = \
ClauseAdapter(dest_selectable,
equivalents=self.child_equivalents).\
chain(primary_aliasizer)
if source_selectable is not None:
primary_aliasizer = \
ClauseAdapter(secondary).\
chain(ClauseAdapter(
source_selectable,
equivalents=self.parent_equivalents))
secondaryjoin = \
secondary_aliasizer.traverse(secondaryjoin)
else:
primary_aliasizer = ClauseAdapter(
dest_selectable,
exclude_fn=_ColInAnnotations("local"),
equivalents=self.child_equivalents)
if source_selectable is not None:
primary_aliasizer.chain(
ClauseAdapter(source_selectable,
exclude_fn=_ColInAnnotations("remote"),
equivalents=self.parent_equivalents))
secondary_aliasizer = None
primaryjoin = primary_aliasizer.traverse(primaryjoin)
target_adapter = secondary_aliasizer or primary_aliasizer
target_adapter.exclude_fn = None
else:
target_adapter = None
return primaryjoin, secondaryjoin, secondary, \
target_adapter, dest_selectable
def create_lazy_clause(self, reverse_direction=False):
binds = util.column_dict()
equated_columns = util.column_dict()
has_secondary = self.secondaryjoin is not None
if has_secondary:
lookup = collections.defaultdict(list)
for l, r in self.local_remote_pairs:
lookup[l].append((l, r))
equated_columns[r] = l
elif not reverse_direction:
for l, r in self.local_remote_pairs:
equated_columns[r] = l
else:
for l, r in self.local_remote_pairs:
equated_columns[l] = r
def col_to_bind(col):
if (
(not reverse_direction and 'local' in col._annotations) or
reverse_direction and (
(has_secondary and col in lookup) or
(not has_secondary and 'remote' in col._annotations)
)
):
if col not in binds:
binds[col] = sql.bindparam(
None, None, type_=col.type, unique=True)
return binds[col]
return None
lazywhere = self.primaryjoin
if self.secondaryjoin is None or not reverse_direction:
lazywhere = visitors.replacement_traverse(
lazywhere, {}, col_to_bind)
if self.secondaryjoin is not None:
secondaryjoin = self.secondaryjoin
if reverse_direction:
secondaryjoin = visitors.replacement_traverse(
secondaryjoin, {}, col_to_bind)
lazywhere = sql.and_(lazywhere, secondaryjoin)
bind_to_col = dict((binds[col].key, col) for col in binds)
# this is probably not necessary
lazywhere = _deep_deannotate(lazywhere)
return lazywhere, bind_to_col, equated_columns
class _ColInAnnotations(object):
"""Seralizable equivalent to:
lambda c: "name" in c._annotations
"""
def __init__(self, name):
self.name = name
def __call__(self, c):
return self.name in c._annotations
|
mit
|
dyn888/youtube-dl
|
youtube_dl/extractor/naver.py
|
22
|
3756
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urlparse,
)
from ..utils import (
ExtractorError,
)
class NaverIE(InfoExtractor):
_VALID_URL = r'https?://(?:m\.)?tvcast\.naver\.com/v/(?P<id>\d+)'
_TESTS = [{
'url': 'http://tvcast.naver.com/v/81652',
'info_dict': {
'id': '81652',
'ext': 'mp4',
'title': '[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번',
'description': '합격불변의 법칙 메가스터디 | 메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.',
'upload_date': '20130903',
},
}, {
'url': 'http://tvcast.naver.com/v/395837',
'md5': '638ed4c12012c458fefcddfd01f173cd',
'info_dict': {
'id': '395837',
'ext': 'mp4',
'title': '9년이 지나도 아픈 기억, 전효성의 아버지',
'description': 'md5:5bf200dcbf4b66eb1b350d1eb9c753f7',
'upload_date': '20150519',
},
'skip': 'Georestricted',
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"',
webpage)
if m_id is None:
error = self._html_search_regex(
r'(?s)<div class="(?:nation_error|nation_box|error_box)">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>',
webpage, 'error', default=None)
if error:
raise ExtractorError(error, expected=True)
raise ExtractorError('couldn\'t extract vid and key')
vid = m_id.group(1)
key = m_id.group(2)
query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key, })
query_urls = compat_urllib_parse.urlencode({
'masterVid': vid,
'protocol': 'p2p',
'inKey': key,
})
info = self._download_xml(
'http://serviceapi.rmcnmv.naver.com/flash/videoInfo.nhn?' + query,
video_id, 'Downloading video info')
urls = self._download_xml(
'http://serviceapi.rmcnmv.naver.com/flash/playableEncodingOption.nhn?' + query_urls,
video_id, 'Downloading video formats info')
formats = []
for format_el in urls.findall('EncodingOptions/EncodingOption'):
domain = format_el.find('Domain').text
uri = format_el.find('uri').text
f = {
'url': compat_urlparse.urljoin(domain, uri),
'ext': 'mp4',
'width': int(format_el.find('width').text),
'height': int(format_el.find('height').text),
}
if domain.startswith('rtmp'):
# urlparse does not support custom schemes
# https://bugs.python.org/issue18828
f.update({
'url': domain + uri,
'ext': 'flv',
'rtmp_protocol': '1', # rtmpt
})
formats.append(f)
self._sort_formats(formats)
return {
'id': video_id,
'title': info.find('Subject').text,
'formats': formats,
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'upload_date': info.find('WriteDate').text.replace('.', ''),
'view_count': int(info.find('PlayCount').text),
}
|
unlicense
|
SlimRoms/kernel_sony_msm8x60
|
tools/perf/scripts/python/check-perf-trace.py
|
11214
|
2503
|
# perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
gpl-2.0
|
goliveirab/odoo
|
addons/l10n_ca/__init__.py
|
438
|
1056
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
solashirai/edx-platform
|
cms/djangoapps/contentstore/features/courses.py
|
171
|
2135
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
from lettuce import world, step
from common import *
############### ACTIONS ####################
@step('There are no courses$')
def no_courses(step):
world.clear_courses()
create_studio_user()
@step('I click the New Course button$')
def i_click_new_course(step):
world.css_click('.new-course-button')
@step('I fill in the new course information$')
def i_fill_in_a_new_course_information(step):
fill_in_course_info()
@step('I create a course with "([^"]*)", "([^"]*)", "([^"]*)", and "([^"]*)"')
def i_create_course(step, name, org, number, run):
fill_in_course_info(name=name, org=org, num=number, run=run)
@step('I create a new course$')
def i_create_a_course(step):
create_a_course()
@step('I click the course link in Studio Home$')
def i_click_the_course_link_in_studio_home(step): # pylint: disable=invalid-name
course_css = 'a.course-link'
world.css_click(course_css)
@step('I see an error about the length of the org/course/run tuple')
def i_see_error_about_length(step):
assert world.css_has_text(
'#course_creation_error',
'The combined length of the organization, course number, '
'and course run fields cannot be more than 65 characters.'
)
############ ASSERTIONS ###################
@step('the Courseware page has loaded in Studio$')
def courseware_page_has_loaded_in_studio(step):
course_title_css = 'span.course-title'
assert world.is_css_present(course_title_css)
@step('I see the course listed in Studio Home$')
def i_see_the_course_in_studio_home(step):
course_css = 'h3.class-title'
assert world.css_has_text(course_css, world.scenario_dict['COURSE'].display_name)
@step('I am on the "([^"]*)" tab$')
def i_am_on_tab(step, tab_name):
header_css = 'div.inner-wrapper h1'
assert world.css_has_text(header_css, tab_name)
@step('I see a link for adding a new section$')
def i_see_new_section_link(step):
link_css = '.outline .button-new'
assert world.css_has_text(link_css, 'New Section')
|
agpl-3.0
|
mathn/jubatus
|
client_test/test_gtest.py
|
3
|
3474
|
# -*- coding: utf-8 -*-
import os
import re
from cStringIO import StringIO
import time
from jubatest import *
from jubatest.unit import JubaSkipTest
from jubatest.remote import SyncRemoteProcess
from jubatest.process import LocalSubprocess
from jubatest.logger import log
class ClientGoogleTestBase():
@classmethod
def find_testcases(cls, test_program):
# Collect all test cases
args = [ test_program, '--gtest_list_tests' ]
proc = LocalSubprocess(args)
proc.start()
returncode = proc.wait()
if returncode != 0:
raise JubaSkipTest('%s cannot list testcases' % test_program)
# read input
stri = StringIO(proc.stdout)
testcases = []
current_test = None
re_test = re.compile('^([a-zA-Z0-9_]+\.)')
re_testcase = re.compile('^ ([a-zA-Z0-9_]+)')
while True:
line = stri.readline()
if line == '': break
if line.find('Running main') != -1: continue
match = re_test.match(line)
if match:
current_test = match.group(1)
match = re_testcase.match(line)
if match and current_test:
testcases.append('%s%s' % (current_test, match.group(1)))
return testcases
@classmethod
def setUpCluster(cls, env):
cls.env = env
@classmethod
def generateTests(cls, env):
if env.get_param('CPP_GTEST') is None:
raise JubaSkipTest('CPP_GTEST parameter is not set')
for service, test_program in env.get_param('CPP_GTEST').items():
for test in cls.find_testcases(test_program):
yield cls.gtest, service, test_program, test
def gtest(self, service, test_program, test):
self.lazySetUp(service)
args = [ test_program, '--gtest_filter=%s' % test ]
env = { 'JUBATUS_HOST': self.client_node.get_host(),
'JUBATUS_PORT': str(self.target.get_host_port()[1]),
'JUBATUS_CLUSTER_NAME': self.name }
env.update(os.environ)
proc = LocalSubprocess(args, env)
proc.start()
returncode = proc.wait()
# Report gtest result when error occured
self.assertEqual(0, returncode, proc.stdout)
class ClientStandaloneTest(JubaTestCase, ClientGoogleTestBase):
def lazySetUp(self, service):
self.server1 = self.env.server_standalone(self.env.get_node(0), service, default_config(service))
self.target = self.server1
self.name = ''
self.client_node = self.env.get_node(0)
self.server1.start()
def tearDown(self):
self.server1.stop()
class ClientDistributedTest(JubaTestCase, ClientGoogleTestBase):
def lazySetUp(self, service):
self.node0 = self.env.get_node(0)
self.cluster = self.env.cluster(service, default_config(service))
self.name = self.cluster.name
self.server1 = self.env.server(self.node0, self.cluster)
self.server2 = self.env.server(self.node0, self.cluster)
self.keeper1 = self.env.keeper(self.node0, service)
self.target = self.keeper1
self.client_node = self.env.get_node(0)
for server in [self.keeper1, self.server1, self.server2]:
server.start()
self.keeper1.wait_for_servers(self.server1, self.server2)
def tearDown(self):
for server in [self.keeper1, self.server1, self.server2]:
server.stop()
|
lgpl-2.1
|
ruslanloman/nova
|
nova/tests/unit/virt/libvirt/test_blockinfo.py
|
7
|
46217
|
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from nova import block_device
from nova.compute import arch
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_block_device
import nova.tests.unit.image.fake
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.virt.libvirt import blockinfo
class LibvirtBlockInfoTest(test.NoDBTestCase):
def setUp(self):
super(LibvirtBlockInfoTest, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.get_admin_context()
nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
self.test_instance = {
'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': 2, # m1.tiny
'config_drive': None,
'system_metadata': {},
}
flavor = objects.Flavor(memory_mb=128,
root_gb=0,
name='m1.micro',
ephemeral_gb=0,
vcpus=1,
swap=0,
rxtx_factor=1.0,
flavorid='1',
vcpu_weight=None,
id=2)
self.test_instance['flavor'] = flavor
self.test_instance['old_flavor'] = None
self.test_instance['new_flavor'] = None
def test_volume_in_mapping(self):
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/sdc1', 'size': 10},
{'disk_bus': 'ide', 'guest_format': None,
'device_name': '/dev/sdd', 'size': 10}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'},
{'mount_device': '/dev/sdf',
'device_path': 'fake_device'}]
block_device_info = {
'root_device_name': '/dev/sda',
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
def _assert_volume_in_mapping(device_name, true_or_false):
self.assertEqual(
true_or_false,
block_device.volume_in_mapping(device_name,
block_device_info))
_assert_volume_in_mapping('sda', False)
_assert_volume_in_mapping('sdb', True)
_assert_volume_in_mapping('sdc1', True)
_assert_volume_in_mapping('sdd', True)
_assert_volume_in_mapping('sde', True)
_assert_volume_in_mapping('sdf', True)
_assert_volume_in_mapping('sdg', False)
_assert_volume_in_mapping('sdh1', False)
def test_find_disk_dev(self):
mapping = {
"disk.local": {
'dev': 'sda',
'bus': 'scsi',
'type': 'disk',
},
"disk.swap": {
'dev': 'sdc',
'bus': 'scsi',
'type': 'disk',
},
}
dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi')
self.assertEqual('sdb', dev)
dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi',
last_device=True)
self.assertEqual('sdz', dev)
dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'virtio')
self.assertEqual('vda', dev)
dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'fdc')
self.assertEqual('fda', dev)
def test_get_next_disk_dev(self):
mapping = {}
mapping['disk.local'] = blockinfo.get_next_disk_info(mapping,
'virtio')
self.assertEqual({'dev': 'vda', 'bus': 'virtio', 'type': 'disk'},
mapping['disk.local'])
mapping['disk.swap'] = blockinfo.get_next_disk_info(mapping,
'virtio')
self.assertEqual({'dev': 'vdb', 'bus': 'virtio', 'type': 'disk'},
mapping['disk.swap'])
mapping['disk.config'] = blockinfo.get_next_disk_info(mapping,
'ide',
'cdrom',
True)
self.assertEqual({'dev': 'hdd', 'bus': 'ide', 'type': 'cdrom'},
mapping['disk.config'])
def test_get_next_disk_dev_boot_index(self):
info = blockinfo.get_next_disk_info({}, 'virtio', boot_index=-1)
self.assertEqual({'dev': 'vda', 'bus': 'virtio', 'type': 'disk'}, info)
info = blockinfo.get_next_disk_info({}, 'virtio', boot_index=2)
self.assertEqual({'dev': 'vda', 'bus': 'virtio',
'type': 'disk', 'boot_index': '2'},
info)
def test_get_disk_mapping_simple(self):
# The simplest possible disk mapping setup, all defaults
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'}
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_simple_rootdev(self):
# A simple disk mapping setup, but with custom root device name
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {
'root_device_name': '/dev/sda'
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
expect = {
'disk': {'bus': 'scsi', 'dev': 'sda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'root': {'bus': 'scsi', 'dev': 'sda',
'type': 'disk', 'boot_index': '1'}
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_rescue(self):
# A simple disk mapping setup, but in rescue mode
instance_ref = objects.Instance()
image_meta = {}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
rescue=True)
expect = {
'disk.rescue': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_lxc(self):
# A simple disk mapping setup, but for lxc
self.test_instance['ephemeral_gb'] = 0
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
mapping = blockinfo.get_disk_mapping("lxc", instance_ref,
"lxc", "lxc",
image_meta)
expect = {
'disk': {'bus': 'lxc', 'dev': None,
'type': 'disk', 'boot_index': '1'},
'root': {'bus': 'lxc', 'dev': None,
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_simple_iso(self):
# A simple disk mapping setup, but with a ISO for root device
instance_ref = objects.Instance(**self.test_instance)
image_meta = {'disk_format': 'iso'}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta)
expect = {
'disk': {'bus': 'ide', 'dev': 'hda',
'type': 'cdrom', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'root': {'bus': 'ide', 'dev': 'hda',
'type': 'cdrom', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_simple_swap(self):
# A simple disk mapping setup, but with a swap device added
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.swap = 5
image_meta = {}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'disk.swap': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_simple_configdrive(self):
# A simple disk mapping setup, but with configdrive added
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
self.flags(force_config_drive=True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta)
# The last device is selected for this. on x86 is the last ide
# device (hdd). Since power only support scsi, the last device
# is sdz
bus_ppc = ("scsi", "sdz")
expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc}
bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}),
("ide", "hdd"))
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'}
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_cdrom_configdrive(self):
# A simple disk mapping setup, with configdrive added as cdrom
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
self.flags(force_config_drive=True)
self.flags(config_drive_format='iso9660')
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta)
bus_ppc = ("scsi", "sdz")
expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc}
bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}),
("ide", "hdd"))
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'}
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_disk_configdrive(self):
# A simple disk mapping setup, with configdrive added as disk
self.flags(force_config_drive=True)
self.flags(config_drive_format='vfat')
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'disk.config': {'bus': 'virtio', 'dev': 'vdz', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_ephemeral(self):
# A disk mapping with ephemeral devices
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.swap = 5
image_meta = {}
block_device_info = {
'ephemerals': [
{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdb', 'size': 10},
{'disk_bus': 'ide', 'guest_format': None,
'device_name': '/dev/vdc', 'size': 10},
{'device_type': 'floppy',
'device_name': '/dev/vdd', 'size': 10},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
'type': 'disk', 'format': 'ext4'},
'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
'disk.eph2': {'bus': 'virtio', 'dev': 'vdd', 'type': 'floppy'},
'disk.swap': {'bus': 'virtio', 'dev': 'vde', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_custom_swap(self):
# A disk mapping with a swap device at position vdb. This
# should cause disk.local to be removed
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {
'swap': {'device_name': '/dev/vdb',
'swap_size': 10},
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.swap': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_blockdev_root(self):
# A disk mapping with a blockdev replacing the default root
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
'boot_index': 0,
'device_type': 'disk',
'delete_on_termination': True},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
expect = {
'/dev/vda': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_blockdev_root_on_spawn(self):
# A disk mapping with a blockdev initializing the default root
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {
'block_device_mapping': [
{'connection_info': None,
'mount_device': None,
'boot_index': 0,
'device_type': None,
'delete_on_termination': True},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
expect = {
'/dev/vda': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_blockdev_eph(self):
# A disk mapping with a blockdev replacing the ephemeral device
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vdb",
'boot_index': -1,
'delete_on_termination': True},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_blockdev_many(self):
# A disk mapping with a blockdev replacing all devices
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
'boot_index': 0,
'disk_bus': 'scsi',
'delete_on_termination': True},
{'connection_info': "fake",
'mount_device': "/dev/vdb",
'boot_index': -1,
'delete_on_termination': True},
{'connection_info': "fake",
'mount_device': "/dev/vdc",
'boot_index': -1,
'device_type': 'cdrom',
'delete_on_termination': True},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
expect = {
'/dev/vda': {'bus': 'scsi', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'/dev/vdc': {'bus': 'virtio', 'dev': 'vdc', 'type': 'cdrom'},
'root': {'bus': 'scsi', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_complex(self):
# The strangest possible disk mapping setup
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {
'root_device_name': '/dev/vdf',
'swap': {'device_name': '/dev/vdy',
'swap_size': 10},
'ephemerals': [
{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdb', 'size': 10},
{'disk_bus': 'ide', 'guest_format': None,
'device_name': '/dev/vdc', 'size': 10},
],
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
'boot_index': 1,
'delete_on_termination': True},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vdf',
'type': 'disk', 'boot_index': '1'},
'/dev/vda': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '2'},
'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
'type': 'disk', 'format': 'ext4'},
'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vdf',
'type': 'disk', 'boot_index': '1'},
}
self.assertEqual(expect, mapping)
def test_get_disk_mapping_updates_original(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {
'root_device_name': '/dev/vda',
'swap': {'device_name': '/dev/vdb',
'device_type': 'really_lame_type',
'swap_size': 10},
'ephemerals': [{'disk_bus': 'no_such_bus',
'device_type': 'yeah_right',
'device_name': '/dev/vdc', 'size': 10}],
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': None,
'device_type': 'lawnmower',
'delete_on_termination': True}]
}
expected_swap = {'device_name': '/dev/vdb', 'disk_bus': 'virtio',
'device_type': 'disk', 'swap_size': 10}
expected_ephemeral = {'disk_bus': 'virtio',
'device_type': 'disk',
'device_name': '/dev/vdc', 'size': 10}
expected_bdm = {'connection_info': "fake",
'mount_device': '/dev/vdd',
'device_type': 'disk',
'disk_bus': 'virtio',
'delete_on_termination': True}
blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
image_meta,
block_device_info)
self.assertEqual(expected_swap, block_device_info['swap'])
self.assertEqual(expected_ephemeral,
block_device_info['ephemerals'][0])
self.assertEqual(expected_bdm,
block_device_info['block_device_mapping'][0])
def test_get_disk_bus(self):
expected = (
(arch.X86_64, 'disk', 'virtio'),
(arch.X86_64, 'cdrom', 'ide'),
(arch.X86_64, 'floppy', 'fdc'),
(arch.PPC, 'disk', 'virtio'),
(arch.PPC, 'cdrom', 'scsi'),
(arch.PPC64, 'disk', 'virtio'),
(arch.PPC64, 'cdrom', 'scsi'),
(arch.S390, 'disk', 'virtio'),
(arch.S390, 'cdrom', 'scsi'),
(arch.S390X, 'disk', 'virtio'),
(arch.S390X, 'cdrom', 'scsi')
)
image_meta = {}
for guestarch, dev, res in expected:
with mock.patch.object(blockinfo.libvirt_utils,
'get_arch',
return_value=guestarch):
bus = blockinfo.get_disk_bus_for_device_type('kvm',
image_meta, dev)
self.assertEqual(res, bus)
expected = (
('scsi', None, 'disk', 'scsi'),
(None, 'scsi', 'cdrom', 'scsi'),
('usb', None, 'disk', 'usb')
)
for dbus, cbus, dev, res in expected:
image_meta = {'properties': {'hw_disk_bus': dbus,
'hw_cdrom_bus': cbus}}
bus = blockinfo.get_disk_bus_for_device_type('kvm',
image_meta,
device_type=dev)
self.assertEqual(res, bus)
image_meta = {'properties': {'hw_disk_bus': 'xen'}}
self.assertRaises(exception.UnsupportedHardware,
blockinfo.get_disk_bus_for_device_type,
'kvm',
image_meta)
def test_success_get_disk_bus_for_disk_dev(self):
expected = (
('ide', ("kvm", "hda")),
('scsi', ("kvm", "sdf")),
('virtio', ("kvm", "vds")),
('fdc', ("kvm", "fdc")),
('uml', ("kvm", "ubd")),
('xen', ("xen", "sdf")),
('xen', ("xen", "xvdb"))
)
for res, args in expected:
self.assertEqual(res, blockinfo.get_disk_bus_for_disk_dev(*args))
def test_fail_get_disk_bus_for_disk_dev_unsupported_virt_type(self):
image_meta = {}
self.assertRaises(exception.UnsupportedVirtType,
blockinfo.get_disk_bus_for_device_type,
'kvm1',
image_meta)
def test_fail_get_disk_bus_for_disk_dev(self):
self.assertRaises(exception.NovaException,
blockinfo.get_disk_bus_for_disk_dev, 'inv', 'val')
def test_get_config_drive_type_default(self):
config_drive_type = blockinfo.get_config_drive_type()
self.assertEqual('cdrom', config_drive_type)
def test_get_config_drive_type_cdrom(self):
self.flags(config_drive_format='iso9660')
config_drive_type = blockinfo.get_config_drive_type()
self.assertEqual('cdrom', config_drive_type)
def test_get_config_drive_type_disk(self):
self.flags(config_drive_format='vfat')
config_drive_type = blockinfo.get_config_drive_type()
self.assertEqual('disk', config_drive_type)
def test_get_info_from_bdm(self):
bdms = [{'device_name': '/dev/vds', 'device_type': 'disk',
'disk_bus': 'usb', 'swap_size': 4},
{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdb', 'size': 2},
{'disk_bus': 'ide', 'guest_format': None,
'device_name': '/dev/vdc', 'size': 3},
{'connection_info': "fake",
'mount_device': "/dev/sdr",
'disk_bus': 'lame_bus',
'device_type': 'cdrom',
'boot_index': 0,
'delete_on_termination': True},
{'connection_info': "fake",
'mount_device': "/dev/vdo",
'disk_bus': 'scsi',
'boot_index': 1,
'device_type': 'lame_type',
'delete_on_termination': True}]
expected = [{'dev': 'vds', 'type': 'disk', 'bus': 'usb'},
{'dev': 'vdb', 'type': 'disk',
'bus': 'virtio', 'format': 'ext4'},
{'dev': 'vdc', 'type': 'disk', 'bus': 'ide'},
{'dev': 'sdr', 'type': 'cdrom',
'bus': 'scsi', 'boot_index': '1'},
{'dev': 'vdo', 'type': 'disk',
'bus': 'scsi', 'boot_index': '2'}]
image_meta = {}
for bdm, expected in zip(bdms, expected):
self.assertEqual(expected,
blockinfo.get_info_from_bdm('kvm',
image_meta,
bdm))
# Test that passed bus and type are considered
bdm = {'device_name': '/dev/vda'}
expected = {'dev': 'vda', 'type': 'disk', 'bus': 'ide'}
self.assertEqual(
expected, blockinfo.get_info_from_bdm('kvm',
image_meta,
bdm,
disk_bus='ide',
dev_type='disk'))
# Test that lame bus values are defaulted properly
bdm = {'disk_bus': 'lame_bus', 'device_type': 'cdrom'}
with mock.patch.object(blockinfo,
'get_disk_bus_for_device_type',
return_value='ide') as get_bus:
blockinfo.get_info_from_bdm('kvm',
image_meta,
bdm)
get_bus.assert_called_once_with('kvm', image_meta, 'cdrom')
# Test that missing device is defaulted as expected
bdm = {'disk_bus': 'ide', 'device_type': 'cdrom'}
expected = {'dev': 'vdd', 'type': 'cdrom', 'bus': 'ide'}
mapping = {'root': {'dev': 'vda'}}
with mock.patch.object(blockinfo,
'find_disk_dev_for_disk_bus',
return_value='vdd') as find_dev:
got = blockinfo.get_info_from_bdm(
'kvm',
image_meta,
bdm,
mapping,
assigned_devices=['vdb', 'vdc'])
find_dev.assert_called_once_with(
{'root': {'dev': 'vda'},
'vdb': {'dev': 'vdb'},
'vdc': {'dev': 'vdc'}}, 'ide')
self.assertEqual(expected, got)
def test_get_device_name(self):
bdm_obj = objects.BlockDeviceMapping(self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'boot_index': 0}))
self.assertEqual('/dev/vda', blockinfo.get_device_name(bdm_obj))
driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm_obj)
self.assertEqual('/dev/vda', blockinfo.get_device_name(driver_bdm))
bdm_obj.device_name = None
self.assertIsNone(blockinfo.get_device_name(bdm_obj))
driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm_obj)
self.assertIsNone(blockinfo.get_device_name(driver_bdm))
@mock.patch('nova.virt.libvirt.blockinfo.find_disk_dev_for_disk_bus',
return_value='vda')
@mock.patch('nova.virt.libvirt.blockinfo.get_disk_bus_for_disk_dev',
return_value='virtio')
def test_get_root_info_no_bdm(self, mock_get_bus, mock_find_dev):
image_meta = {}
blockinfo.get_root_info('kvm', image_meta, None, 'virtio', 'ide')
mock_find_dev.assert_called_once_with({}, 'virtio')
blockinfo.get_root_info('kvm', image_meta, None, 'virtio', 'ide',
root_device_name='/dev/vda')
mock_get_bus.assert_called_once_with('kvm', '/dev/vda')
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
def test_get_root_info_bdm(self, mock_get_info):
image_meta = {}
root_bdm = {'mount_device': '/dev/vda',
'disk_bus': 'scsi',
'device_type': 'disk'}
# No root_device_name
blockinfo.get_root_info('kvm', image_meta, root_bdm, 'virtio', 'ide')
mock_get_info.assert_called_once_with('kvm', image_meta,
root_bdm, {}, 'virtio')
mock_get_info.reset_mock()
# Both device names
blockinfo.get_root_info('kvm', image_meta, root_bdm, 'virtio', 'ide',
root_device_name='sda')
mock_get_info.assert_called_once_with('kvm', image_meta,
root_bdm, {}, 'virtio')
mock_get_info.reset_mock()
# Missing device names
del root_bdm['mount_device']
blockinfo.get_root_info('kvm', image_meta, root_bdm, 'virtio', 'ide',
root_device_name='sda')
mock_get_info.assert_called_once_with('kvm',
image_meta,
{'device_name': 'sda',
'disk_bus': 'scsi',
'device_type': 'disk'},
{}, 'virtio')
def test_get_boot_order_simple(self):
disk_info = {
'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
}
expected_order = ['hd']
self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
def test_get_boot_order_complex(self):
disk_info = {
'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {
'disk': {'bus': 'virtio', 'dev': 'vdf',
'type': 'disk', 'boot_index': '1'},
'/dev/hda': {'bus': 'ide', 'dev': 'hda',
'type': 'cdrom', 'boot_index': '3'},
'/dev/fda': {'bus': 'fdc', 'dev': 'fda',
'type': 'floppy', 'boot_index': '2'},
'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
'type': 'disk', 'format': 'ext4'},
'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vdf',
'type': 'disk', 'boot_index': '1'},
}
}
expected_order = ['hd', 'fd', 'cdrom']
self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
def test_get_boot_order_overlapping(self):
disk_info = {
'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {
'/dev/vda': {'bus': 'scsi', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'/dev/vdb': {'bus': 'virtio', 'dev': 'vdb',
'type': 'disk', 'boot_index': '2'},
'/dev/vdc': {'bus': 'virtio', 'dev': 'vdc',
'type': 'cdrom', 'boot_index': '3'},
'root': {'bus': 'scsi', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
}
}
expected_order = ['hd', 'cdrom']
self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
class DefaultDeviceNamesTestCase(test.NoDBTestCase):
def setUp(self):
super(DefaultDeviceNamesTestCase, self).setUp()
self.context = context.get_admin_context()
self.instance = objects.Instance(
uuid='32dfcb37-5af1-552b-357c-be8c3aa38310',
memory_kb='1024000',
basepath='/some/path',
bridge_name='br100',
vcpus=2,
project_id='fake',
bridge='br101',
image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
root_gb=10,
ephemeral_gb=20,
instance_type_id=2,
config_drive=False,
root_device_name = '/dev/vda',
system_metadata={})
self.virt_type = 'kvm'
self.flavor = objects.Flavor(swap=4)
self.patchers = []
self.patchers.append(mock.patch.object(self.instance, 'get_flavor',
return_value=self.flavor))
self.patchers.append(mock.patch(
'nova.objects.block_device.BlockDeviceMapping.save'))
for patcher in self.patchers:
patcher.start()
self.ephemerals = [objects.BlockDeviceMapping(
self.context, **fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'disk_bus': 'virtio',
'delete_on_termination': True,
'guest_format': None,
'volume_size': 1,
'boot_index': -1}))]
self.swap = [objects.BlockDeviceMapping(
self.context, **fake_block_device.FakeDbBlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdc',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'disk_bus': 'virtio',
'delete_on_termination': True,
'guest_format': 'swap',
'volume_size': 1,
'boot_index': -1}))]
self.block_device_mapping = [
objects.BlockDeviceMapping(self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vda',
'source_type': 'volume',
'destination_type': 'volume',
'device_type': 'disk',
'disk_bus': 'virtio',
'volume_id': 'fake-volume-id-1',
'boot_index': 0})),
objects.BlockDeviceMapping(self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdd',
'source_type': 'snapshot',
'device_type': 'disk',
'disk_bus': 'virtio',
'destination_type': 'volume',
'snapshot_id': 'fake-snapshot-id-1',
'boot_index': -1})),
objects.BlockDeviceMapping(self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 5, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vde',
'source_type': 'blank',
'device_type': 'disk',
'disk_bus': 'virtio',
'destination_type': 'volume',
'boot_index': -1}))]
def tearDown(self):
super(DefaultDeviceNamesTestCase, self).tearDown()
for patcher in self.patchers:
patcher.stop()
def _test_default_device_names(self, eph, swap, bdm):
image_meta = {}
bdms = eph + swap + bdm
bdi = driver.get_block_device_info(self.instance, bdms)
blockinfo.default_device_names(self.virt_type,
self.context,
self.instance,
bdi,
image_meta)
def test_only_block_device_mapping(self):
# Test no-op
original_bdm = copy.deepcopy(self.block_device_mapping)
self._test_default_device_names([], [], self.block_device_mapping)
for original, defaulted in zip(
original_bdm, self.block_device_mapping):
self.assertEqual(original.device_name, defaulted.device_name)
# Assert it defaults the missing one as expected
self.block_device_mapping[1]['device_name'] = None
self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names([], [], self.block_device_mapping)
self.assertEqual('/dev/vdd',
self.block_device_mapping[1]['device_name'])
self.assertEqual('/dev/vde',
self.block_device_mapping[2]['device_name'])
def test_with_ephemerals(self):
# Test ephemeral gets assigned
self.ephemerals[0]['device_name'] = None
self._test_default_device_names(self.ephemerals, [],
self.block_device_mapping)
self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
self.block_device_mapping[1]['device_name'] = None
self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names(self.ephemerals, [],
self.block_device_mapping)
self.assertEqual('/dev/vdd',
self.block_device_mapping[1]['device_name'])
self.assertEqual('/dev/vde',
self.block_device_mapping[2]['device_name'])
def test_with_swap(self):
# Test swap only
self.swap[0]['device_name'] = None
self._test_default_device_names([], self.swap, [])
self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
# Test swap and block_device_mapping
self.swap[0]['device_name'] = None
self.block_device_mapping[1]['device_name'] = None
self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names([], self.swap,
self.block_device_mapping)
self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
self.assertEqual('/dev/vdd',
self.block_device_mapping[1]['device_name'])
self.assertEqual('/dev/vde',
self.block_device_mapping[2]['device_name'])
def test_all_together(self):
# Test swap missing
self.swap[0]['device_name'] = None
self._test_default_device_names(self.ephemerals,
self.swap, self.block_device_mapping)
self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
# Test swap and eph missing
self.swap[0]['device_name'] = None
self.ephemerals[0]['device_name'] = None
self._test_default_device_names(self.ephemerals,
self.swap, self.block_device_mapping)
self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
# Test all missing
self.swap[0]['device_name'] = None
self.ephemerals[0]['device_name'] = None
self.block_device_mapping[1]['device_name'] = None
self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names(self.ephemerals,
self.swap, self.block_device_mapping)
self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
self.assertEqual('/dev/vdd',
self.block_device_mapping[1]['device_name'])
self.assertEqual('/dev/vde',
self.block_device_mapping[2]['device_name'])
|
apache-2.0
|
a-doumoulakis/tensorflow
|
tensorflow/contrib/saved_model/python/saved_model/utils_test.py
|
29
|
4516
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for saved_model utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.saved_model.python.saved_model import utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
class UtilsTest(test.TestCase):
def _init_and_validate_variable(self, sess, variable_name, variable_value):
v = variables.Variable(variable_value, name=variable_name)
sess.run(variables.global_variables_initializer())
self.assertEqual(variable_value, v.eval())
return v
def _check_variable_info(self, actual_variable, expected_variable):
self.assertEqual(actual_variable.name, expected_variable.name)
self.assertEqual(actual_variable.dtype, expected_variable.dtype)
self.assertEqual(len(actual_variable.shape), len(expected_variable.shape))
for i in range(len(actual_variable.shape)):
self.assertEqual(actual_variable.shape[i], expected_variable.shape[i])
def _check_tensor_info(self, actual_tensor_info, expected_tensor):
self.assertEqual(actual_tensor_info.name, expected_tensor.name)
self.assertEqual(actual_tensor_info.dtype, expected_tensor.dtype)
self.assertEqual(
len(actual_tensor_info.tensor_shape.dim), len(expected_tensor.shape))
for i in range(len(actual_tensor_info.tensor_shape.dim)):
self.assertEqual(actual_tensor_info.tensor_shape.dim[i].size,
expected_tensor.shape[i])
def testSimpleSave(self):
"""Test simple_save that uses the default parameters."""
export_dir = os.path.join(test.get_temp_dir(),
"test_simple_save")
# Initialize input and output variables and save a prediction graph using
# the default parameters.
with self.test_session(graph=ops.Graph()) as sess:
var_x = self._init_and_validate_variable(sess, "var_x", 1)
var_y = self._init_and_validate_variable(sess, "var_y", 2)
inputs = {"x": var_x}
outputs = {"y": var_y}
utils.simple_save(sess, export_dir, inputs, outputs)
# Restore the graph with a valid tag and check the global variables and
# signature def map.
with self.test_session(graph=ops.Graph()) as sess:
graph = loader.load(sess, [tag_constants.SERVING], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
# Check value and metadata of the saved variables.
self.assertEqual(len(collection_vars), 2)
self.assertEqual(1, collection_vars[0].eval())
self.assertEqual(2, collection_vars[1].eval())
self._check_variable_info(collection_vars[0], var_x)
self._check_variable_info(collection_vars[1], var_y)
# Check that the appropriate signature_def_map is created with the
# default key and method name, and the specified inputs and outputs.
signature_def_map = graph.signature_def
self.assertEqual(1, len(signature_def_map))
self.assertEqual(signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
list(signature_def_map.keys())[0])
signature_def = signature_def_map[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
self.assertEqual(signature_constants.PREDICT_METHOD_NAME,
signature_def.method_name)
self.assertEqual(1, len(signature_def.inputs))
self._check_tensor_info(signature_def.inputs["x"], var_x)
self.assertEqual(1, len(signature_def.outputs))
self._check_tensor_info(signature_def.outputs["y"], var_y)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
imply/chuu
|
native_client_sdk/src/tools/tests/create_nmf_test.py
|
53
|
7804
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.dirname(SCRIPT_DIR)
DATA_DIR = os.path.join(SCRIPT_DIR, 'data')
CHROME_SRC = os.path.dirname(os.path.dirname(os.path.dirname(PARENT_DIR)))
MOCK_DIR = os.path.join(CHROME_SRC, "third_party", "pymock")
# For the mock library
sys.path.append(MOCK_DIR)
sys.path.append(PARENT_DIR)
import create_nmf
import getos
import mock
class TestIsDynamicElf(unittest.TestCase):
def test_arm(self):
static_nexe = os.path.join(DATA_DIR, 'test_static_arm.nexe')
self.assertFalse(create_nmf.IsDynamicElf(static_nexe, False))
def test_x86_32(self):
dyn_nexe = os.path.join(DATA_DIR, 'test_dynamic_x86_32.nexe')
static_nexe = os.path.join(DATA_DIR, 'test_static_x86_32.nexe')
self.assertTrue(create_nmf.IsDynamicElf(dyn_nexe, False))
self.assertFalse(create_nmf.IsDynamicElf(static_nexe, False))
def test_x86_64(self):
dyn_nexe = os.path.join(DATA_DIR, 'test_dynamic_x86_64.nexe')
static_nexe = os.path.join(DATA_DIR, 'test_static_x86_64.nexe')
self.assertTrue(create_nmf.IsDynamicElf(dyn_nexe, True))
self.assertFalse(create_nmf.IsDynamicElf(static_nexe, True))
class TestParseElfHeader(unittest.TestCase):
def test_invalid_elf(self):
self.assertRaises(create_nmf.Error, create_nmf.ParseElfHeader, __file__)
def test_arm_elf_parse(self):
"""Test parsing of ARM elf header."""
static_nexe = os.path.join(DATA_DIR, 'test_static_arm.nexe')
arch, dynamic = create_nmf.ParseElfHeader(static_nexe)
self.assertEqual(arch, 'arm')
self.assertFalse(dynamic)
def test_x86_32_elf_parse(self):
"""Test parsing of x86-32 elf header."""
dyn_nexe = os.path.join(DATA_DIR, 'test_dynamic_x86_32.nexe')
static_nexe = os.path.join(DATA_DIR, 'test_static_x86_32.nexe')
arch, dynamic = create_nmf.ParseElfHeader(dyn_nexe)
self.assertEqual(arch, 'x86-32')
self.assertTrue(dynamic)
arch, dynamic = create_nmf.ParseElfHeader(static_nexe)
self.assertEqual(arch, 'x86-32')
self.assertFalse(dynamic)
def test_x86_64_elf_parse(self):
"""Test parsing of x86-64 elf header."""
dyn_nexe = os.path.join(DATA_DIR, 'test_dynamic_x86_64.nexe')
static_nexe = os.path.join(DATA_DIR, 'test_static_x86_64.nexe')
arch, dynamic = create_nmf.ParseElfHeader(dyn_nexe)
self.assertEqual(arch, 'x86-64')
self.assertTrue(dynamic)
arch, dynamic = create_nmf.ParseElfHeader(static_nexe)
self.assertEqual(arch, 'x86-64')
self.assertFalse(dynamic)
class TestDefaultLibpath(unittest.TestCase):
def testWithoutNaClSDKRoot(self):
"""GetDefaultLibPath wihtout NACL_SDK_ROOT set
In the absence of NACL_SDK_ROOT GetDefaultLibPath should
return the empty list."""
with mock.patch.dict('os.environ', clear=True):
paths = create_nmf.GetDefaultLibPath('Debug')
self.assertEqual(paths, [])
def testHonorNaClSDKRoot(self):
with mock.patch.dict('os.environ', {'NACL_SDK_ROOT': '/dummy/path'}):
paths = create_nmf.GetDefaultLibPath('Debug')
for path in paths:
self.assertTrue(path.startswith('/dummy/path'))
def testIncludesNaClPorts(self):
with mock.patch.dict('os.environ', {'NACL_SDK_ROOT': '/dummy/path'}):
paths = create_nmf.GetDefaultLibPath('Debug')
self.assertTrue(any(os.path.join('ports', 'lib') in p for p in paths),
"naclports libpath missing: %s" % str(paths))
class TestNmfUtils(unittest.TestCase):
"""Tests for the main NmfUtils class in create_nmf."""
def setUp(self):
self.tempdir = None
toolchain = os.path.join(CHROME_SRC, 'native_client', 'toolchain')
self.toolchain = os.path.join(toolchain, '%s_x86' % getos.GetPlatform())
self.objdump = os.path.join(self.toolchain, 'bin', 'i686-nacl-objdump')
if os.name == 'nt':
self.objdump += '.exe'
self.Mktemp()
self.dyn_nexe = self.createTestNexe('test_dynamic_x86_32.nexe', True,
'i686')
self.dyn_deps = set(['libc.so', 'runnable-ld.so',
'libgcc_s.so', 'libpthread.so'])
def createTestNexe(self, name, dynamic, arch):
"""Create an empty test .nexe file for use in create_nmf tests.
This is used rather than checking in test binaries since the
checked in binaries depend on .so files that only exist in the
certain SDK that build them.
"""
compiler = os.path.join(self.toolchain, 'bin', '%s-nacl-g++' % arch)
if os.name == 'nt':
compiler += '.exe'
os.environ['CYGWIN'] = 'nodosfilewarning'
program = 'int main() { return 0; }'
name = os.path.join(self.tempdir, name)
cmd = [compiler, '-pthread', '-x' , 'c', '-o', name, '-']
p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
p.communicate(input=program)
self.assertEqual(p.returncode, 0)
return name
def tearDown(self):
if self.tempdir:
shutil.rmtree(self.tempdir)
def Mktemp(self):
self.tempdir = tempfile.mkdtemp()
def CreateNmfUtils(self, libdir=None):
if not libdir:
libdir = os.path.join(self.toolchain, 'x86_64-nacl', 'lib32')
return create_nmf.NmfUtils([self.dyn_nexe],
lib_path=[libdir],
objdump=self.objdump)
def testGetNeededStatic(self):
nexe = os.path.join(DATA_DIR, 'test_static_x86_32.nexe')
nmf = create_nmf.NmfUtils([nexe])
needed = nmf.GetNeeded()
# static nexe should have exactly one needed file
self.assertEqual(len(needed), 1)
self.assertEqual(needed.keys()[0], nexe)
# arch of needed file should be x86-32
archfile = needed.values()[0]
self.assertEqual(archfile.arch, 'x86-32')
def StripDependencies(self, deps):
"""Strip the dirnames and version suffixes from
a list of nexe dependencies.
e.g:
/path/to/libpthread.so.1a2d3fsa -> libpthread.so
"""
names = []
for name in deps:
name = os.path.basename(name)
if '.so.' in name:
name = name.rsplit('.', 1)[0]
names.append(name)
return names
def testGetNeededDynamic(self):
nmf = self.CreateNmfUtils()
needed = nmf.GetNeeded()
names = needed.keys()
# this nexe has 5 dependencies
expected = set(self.dyn_deps)
expected.add(os.path.basename(self.dyn_nexe))
basenames = set(self.StripDependencies(names))
self.assertEqual(expected, basenames)
def testStageDependencies(self):
self.Mktemp()
nmf = self.CreateNmfUtils()
#create_nmf.DebugPrint.debug_mode = True
#create_nmf.Trace.verbose = True
# Stage dependencies
nmf.StageDependencies(self.tempdir)
# Verify directory contents
contents = set(os.listdir(self.tempdir))
expectedContents = set((os.path.basename(self.dyn_nexe), 'lib32'))
self.assertEqual(contents, expectedContents)
contents = os.listdir(os.path.join(self.tempdir, 'lib32'))
contents = self.StripDependencies(contents)
contents = set(contents)
expectedContents = self.dyn_deps
self.assertEqual(contents, expectedContents)
def testMissingArchLibrary(self):
self.Mktemp()
nmf = self.CreateNmfUtils()
# CreateNmfUtils uses the 32-bit library path, but not the 64-bit one
# so searching for a 32-bit library should succeed while searching for
# a 64-bit one should fail.
nmf.GleanFromObjdump(['libgcc_s.so.1'], 'x86-32')
self.assertRaises(create_nmf.Error,
nmf.GleanFromObjdump, ['libgcc_s.so.1'], 'x86-64')
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
Hani-K/H-Vitamin_trlte
|
tools/perf/scripts/python/event_analyzing_sample.py
|
4719
|
7393
|
# event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
|
gpl-2.0
|
rishibarve/incubator-airflow
|
airflow/contrib/hooks/databricks_hook.py
|
22
|
7622
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import requests
from airflow import __version__
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from requests import exceptions as requests_exceptions
try:
from urllib import parse as urlparse
except ImportError:
import urlparse
SUBMIT_RUN_ENDPOINT = ('POST', 'api/2.0/jobs/runs/submit')
GET_RUN_ENDPOINT = ('GET', 'api/2.0/jobs/runs/get')
CANCEL_RUN_ENDPOINT = ('POST', 'api/2.0/jobs/runs/cancel')
USER_AGENT_HEADER = {'user-agent': 'airflow-{v}'.format(v=__version__)}
class DatabricksHook(BaseHook):
"""
Interact with Databricks.
"""
def __init__(
self,
databricks_conn_id='databricks_default',
timeout_seconds=180,
retry_limit=3):
"""
:param databricks_conn_id: The name of the databricks connection to use.
:type databricks_conn_id: string
:param timeout_seconds: The amount of time in seconds the requests library
will wait before timing-out.
:type timeout_seconds: int
:param retry_limit: The number of times to retry the connection in case of
service outages.
:type retry_limit: int
"""
self.databricks_conn_id = databricks_conn_id
self.databricks_conn = self.get_connection(databricks_conn_id)
self.timeout_seconds = timeout_seconds
assert retry_limit >= 1, 'Retry limit must be greater than equal to 1'
self.retry_limit = retry_limit
def _parse_host(self, host):
"""
The purpose of this function is to be robust to improper connections
settings provided by users, specifically in the host field.
For example -- when users supply ``https://xx.cloud.databricks.com`` as the
host, we must strip out the protocol to get the host.
>>> h = DatabricksHook()
>>> assert h._parse_host('https://xx.cloud.databricks.com') == \
'xx.cloud.databricks.com'
In the case where users supply the correct ``xx.cloud.databricks.com`` as the
host, this function is a no-op.
>>> assert h._parse_host('xx.cloud.databricks.com') == 'xx.cloud.databricks.com'
"""
urlparse_host = urlparse.urlparse(host).hostname
if urlparse_host:
# In this case, host = https://xx.cloud.databricks.com
return urlparse_host
else:
# In this case, host = xx.cloud.databricks.com
return host
def _do_api_call(self, endpoint_info, json):
"""
Utility function to perform an API call with retries
:param endpoint_info: Tuple of method and endpoint
:type endpoint_info: (string, string)
:param json: Parameters for this API call.
:type json: dict
:return: If the api call returns a OK status code,
this function returns the response in JSON. Otherwise,
we throw an AirflowException.
:rtype: dict
"""
method, endpoint = endpoint_info
url = 'https://{host}/{endpoint}'.format(
host=self._parse_host(self.databricks_conn.host),
endpoint=endpoint)
auth = (self.databricks_conn.login, self.databricks_conn.password)
if method == 'GET':
request_func = requests.get
elif method == 'POST':
request_func = requests.post
else:
raise AirflowException('Unexpected HTTP Method: ' + method)
for attempt_num in range(1, self.retry_limit+1):
try:
response = request_func(
url,
json=json,
auth=auth,
headers=USER_AGENT_HEADER,
timeout=self.timeout_seconds)
if response.status_code == requests.codes.ok:
return response.json()
else:
# In this case, the user probably made a mistake.
# Don't retry.
raise AirflowException('Response: {0}, Status Code: {1}'.format(
response.content, response.status_code))
except (requests_exceptions.ConnectionError,
requests_exceptions.Timeout) as e:
logging.error(('Attempt {0} API Request to Databricks failed ' +
'with reason: {1}').format(attempt_num, e))
raise AirflowException(('API requests to Databricks failed {} times. ' +
'Giving up.').format(self.retry_limit))
def submit_run(self, json):
"""
Utility function to call the ``api/2.0/jobs/runs/submit`` endpoint.
:param json: The data used in the body of the request to the ``submit`` endpoint.
:type json: dict
:return: the run_id as a string
:rtype: string
"""
response = self._do_api_call(SUBMIT_RUN_ENDPOINT, json)
return response['run_id']
def get_run_page_url(self, run_id):
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response['run_page_url']
def get_run_state(self, run_id):
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
state = response['state']
life_cycle_state = state['life_cycle_state']
# result_state may not be in the state if not terminal
result_state = state.get('result_state', None)
state_message = state['state_message']
return RunState(life_cycle_state, result_state, state_message)
def cancel_run(self, run_id):
json = {'run_id': run_id}
self._do_api_call(CANCEL_RUN_ENDPOINT, json)
RUN_LIFE_CYCLE_STATES = [
'PENDING',
'RUNNING',
'TERMINATING',
'TERMINATED',
'SKIPPED',
'INTERNAL_ERROR'
]
class RunState:
"""
Utility class for the run state concept of Databricks runs.
"""
def __init__(self, life_cycle_state, result_state, state_message):
self.life_cycle_state = life_cycle_state
self.result_state = result_state
self.state_message = state_message
@property
def is_terminal(self):
if self.life_cycle_state not in RUN_LIFE_CYCLE_STATES:
raise AirflowException(('Unexpected life cycle state: {}: If the state has '
'been introduced recently, please check the Databricks user '
'guide for troubleshooting information').format(
self.life_cycle_state))
return self.life_cycle_state in ('TERMINATED', 'SKIPPED', 'INTERNAL_ERROR')
@property
def is_successful(self):
return self.result_state == 'SUCCESS'
def __eq__(self, other):
return self.life_cycle_state == other.life_cycle_state and \
self.result_state == other.result_state and \
self.state_message == other.state_message
def __repr__(self):
return str(self.__dict__)
|
apache-2.0
|
j-carpentier/nova
|
nova/db/sqlalchemy/api_models.py
|
43
|
2611
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db.sqlalchemy import models
from sqlalchemy import Column
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import schema
from sqlalchemy import String
from sqlalchemy import Text
class _NovaAPIBase(models.ModelBase, models.TimestampMixin):
pass
API_BASE = declarative_base(cls=_NovaAPIBase)
class CellMapping(API_BASE):
"""Contains information on communicating with a cell"""
__tablename__ = 'cell_mappings'
__table_args__ = (Index('uuid_idx', 'uuid'),
schema.UniqueConstraint('uuid',
name='uniq_cell_mappings0uuid'))
id = Column(Integer, primary_key=True)
uuid = Column(String(36), nullable=False)
name = Column(String(255))
transport_url = Column(Text())
database_connection = Column(Text())
class InstanceMapping(API_BASE):
"""Contains the mapping of an instance to which cell it is in"""
__tablename__ = 'instance_mappings'
__table_args__ = (Index('project_id_idx', 'project_id'),
Index('instance_uuid_idx', 'instance_uuid'),
schema.UniqueConstraint('instance_uuid',
name='uniq_instance_mappings0instance_uuid'))
id = Column(Integer, primary_key=True)
instance_uuid = Column(String(36), nullable=False)
cell_id = Column(Integer, ForeignKey('cell_mappings.id'),
nullable=False)
project_id = Column(String(255), nullable=False)
class HostMapping(API_BASE):
"""Contains mapping of a compute host to which cell it is in"""
__tablename__ = "host_mappings"
__table_args__ = (Index('host_idx', 'host'),
schema.UniqueConstraint('host',
name='uniq_host_mappings0host'))
id = Column(Integer, primary_key=True)
cell_id = Column(Integer, ForeignKey('cell_mappings.id'),
nullable=False)
host = Column(String(255), nullable=False)
|
apache-2.0
|
garvitr/sympy
|
sympy/printing/dot.py
|
103
|
6644
|
from __future__ import print_function, division
from sympy import (Basic, Expr, Symbol, Integer, Rational, Float,
default_sort_key, Add, Mul)
__all__ = ['dotprint']
default_styles = [(Basic, {'color': 'blue', 'shape': 'ellipse'}),
(Expr, {'color': 'black'})]
sort_classes = (Add, Mul)
slotClasses = (Symbol, Integer, Rational, Float)
# XXX: Why not just use srepr()?
def purestr(x):
""" A string that follows obj = type(obj)(*obj.args) exactly """
if not isinstance(x, Basic):
return str(x)
if type(x) in slotClasses:
args = [getattr(x, slot) for slot in x.__slots__]
elif type(x) in sort_classes:
args = sorted(x.args, key=default_sort_key)
else:
args = x.args
return "%s(%s)"%(type(x).__name__, ', '.join(map(purestr, args)))
def styleof(expr, styles=default_styles):
""" Merge style dictionaries in order
>>> from sympy import Symbol, Basic, Expr
>>> from sympy.printing.dot import styleof
>>> styles = [(Basic, {'color': 'blue', 'shape': 'ellipse'}),
... (Expr, {'color': 'black'})]
>>> styleof(Basic(1), styles)
{'color': 'blue', 'shape': 'ellipse'}
>>> x = Symbol('x')
>>> styleof(x + 1, styles) # this is an Expr
{'color': 'black', 'shape': 'ellipse'}
"""
style = dict()
for typ, sty in styles:
if isinstance(expr, typ):
style.update(sty)
return style
def attrprint(d, delimiter=', '):
""" Print a dictionary of attributes
>>> from sympy.printing.dot import attrprint
>>> print(attrprint({'color': 'blue', 'shape': 'ellipse'}))
"color"="blue", "shape"="ellipse"
"""
return delimiter.join('"%s"="%s"'%item for item in sorted(d.items()))
def dotnode(expr, styles=default_styles, labelfunc=str, pos=(), repeat=True):
""" String defining a node
>>> from sympy.printing.dot import dotnode
>>> from sympy.abc import x
>>> print(dotnode(x))
"Symbol(x)_()" ["color"="black", "label"="x", "shape"="ellipse"];
"""
style = styleof(expr, styles)
if isinstance(expr, Basic) and not expr.is_Atom:
label = str(expr.__class__.__name__)
else:
label = labelfunc(expr)
style['label'] = label
expr_str = purestr(expr)
if repeat:
expr_str += '_%s' % str(pos)
return '"%s" [%s];' % (expr_str, attrprint(style))
def dotedges(expr, atom=lambda x: not isinstance(x, Basic), pos=(), repeat=True):
""" List of strings for all expr->expr.arg pairs
See the docstring of dotprint for explanations of the options.
>>> from sympy.printing.dot import dotedges
>>> from sympy.abc import x
>>> for e in dotedges(x+2):
... print(e)
"Add(Integer(2), Symbol(x))_()" -> "Integer(2)_(0,)";
"Add(Integer(2), Symbol(x))_()" -> "Symbol(x)_(1,)";
"""
if atom(expr):
return []
else:
# TODO: This is quadratic in complexity (purestr(expr) already
# contains [purestr(arg) for arg in expr.args]).
expr_str = purestr(expr)
arg_strs = [purestr(arg) for arg in expr.args]
if repeat:
expr_str += '_%s' % str(pos)
arg_strs = [arg_str + '_%s' % str(pos + (i,)) for i, arg_str in enumerate(arg_strs)]
return ['"%s" -> "%s";'%(expr_str, arg_str) for arg_str in arg_strs]
template = \
"""digraph{
# Graph style
%(graphstyle)s
#########
# Nodes #
#########
%(nodes)s
#########
# Edges #
#########
%(edges)s
}"""
graphstyle = {'rankdir': 'TD', 'ordering': 'out'}
def dotprint(expr, styles=default_styles, atom=lambda x: not isinstance(x,
Basic), maxdepth=None, repeat=True, labelfunc=str, **kwargs):
"""
DOT description of a SymPy expression tree
Options are
``styles``: Styles for different classes. The default is::
[(Basic, {'color': 'blue', 'shape': 'ellipse'}),
(Expr, {'color': 'black'})]``
``atom``: Function used to determine if an arg is an atom. The default is
``lambda x: not isinstance(x, Basic)``. Another good choice is
``lambda x: not x.args``.
``maxdepth``: The maximum depth. The default is None, meaning no limit.
``repeat``: Whether to different nodes for separate common subexpressions.
The default is True. For example, for ``x + x*y`` with
``repeat=True``, it will have two nodes for ``x`` and with
``repeat=False``, it will have one (warning: even if it appears
twice in the same object, like Pow(x, x), it will still only appear
only once. Hence, with repeat=False, the number of arrows out of an
object might not equal the number of args it has).
``labelfunc``: How to label leaf nodes. The default is ``str``. Another
good option is ``srepr``. For example with ``str``, the leaf nodes
of ``x + 1`` are labeled, ``x`` and ``1``. With ``srepr``, they
are labeled ``Symbol('x')`` and ``Integer(1)``.
Additional keyword arguments are included as styles for the graph.
Examples
========
>>> from sympy.printing.dot import dotprint
>>> from sympy.abc import x
>>> print(dotprint(x+2)) # doctest: +NORMALIZE_WHITESPACE
digraph{
<BLANKLINE>
# Graph style
"ordering"="out"
"rankdir"="TD"
<BLANKLINE>
#########
# Nodes #
#########
<BLANKLINE>
"Add(Integer(2), Symbol(x))_()" ["color"="black", "label"="Add", "shape"="ellipse"];
"Integer(2)_(0,)" ["color"="black", "label"="2", "shape"="ellipse"];
"Symbol(x)_(1,)" ["color"="black", "label"="x", "shape"="ellipse"];
<BLANKLINE>
#########
# Edges #
#########
<BLANKLINE>
"Add(Integer(2), Symbol(x))_()" -> "Integer(2)_(0,)";
"Add(Integer(2), Symbol(x))_()" -> "Symbol(x)_(1,)";
}
"""
# repeat works by adding a signature tuple to the end of each node for its
# position in the graph. For example, for expr = Add(x, Pow(x, 2)), the x in the
# Pow will have the tuple (1, 0), meaning it is expr.args[1].args[0].
graphstyle.update(kwargs)
nodes = []
edges = []
def traverse(e, depth, pos=()):
nodes.append(dotnode(e, styles, labelfunc=labelfunc, pos=pos, repeat=repeat))
if maxdepth and depth >= maxdepth:
return
edges.extend(dotedges(e, atom=atom, pos=pos, repeat=repeat))
[traverse(arg, depth+1, pos + (i,)) for i, arg in enumerate(e.args) if not atom(arg)]
traverse(expr, 0)
return template%{'graphstyle': attrprint(graphstyle, delimiter='\n'),
'nodes': '\n'.join(nodes),
'edges': '\n'.join(edges)}
|
bsd-3-clause
|
phev8/dataset_tools
|
experiment_handler/check_signal_availabilities.py
|
1
|
1790
|
from experiment_handler.imu_data_reader import get_imu_data
from experiment_handler.finder import find_all_imu_files
import os
import matplotlib.pyplot as plt
def plot_imu_signals_for_experiment(exp_root, figsize=(10, 10), reference_time="video"):
imu_files = find_all_imu_files(exp_root)
for imu_file_path in imu_files:
source = os.path.basename(imu_file_path).split('.')[0]
data = get_imu_data(exp_root, source, start=None, end=None, reference_time=reference_time, convert_time=True)
f, axarr = plt.subplots(4, sharex=True, figsize=figsize)
index = 1
axarr[0].plot(data[:, 0], data[:, index])
axarr[0].plot(data[:, 0], data[:, index + 1])
axarr[0].plot(data[:, 0], data[:, index + 2])
axarr[0].grid()
axarr[0].set_title(source + ' - Acceleration')
index = 4
axarr[1].plot(data[:, 0], data[:, index])
axarr[1].plot(data[:, 0], data[:, index + 1])
axarr[1].plot(data[:, 0], data[:, index + 2])
axarr[1].grid()
axarr[1].set_title(source + ' - Gyro')
index = 7
axarr[2].plot(data[:, 0], data[:, index])
axarr[2].plot(data[:, 0], data[:, index + 1])
axarr[2].plot(data[:, 0], data[:, index + 2])
axarr[2].grid()
axarr[2].set_title(source + ' - Magnetic')
index = 10
axarr[3].plot(data[:, 0], data[:, index])
axarr[3].plot(data[:, 0], data[:, index + 1])
axarr[3].plot(data[:, 0], data[:, index + 2])
axarr[3].grid()
axarr[3].set_title(source + ' - Euler angles')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
exp_root = "/Volumes/DataDrive/igroups_recordings/igroups_experiment_8"
plot_imu_signals_for_experiment(exp_root)
|
mit
|
praekelt/sideloader2
|
sideloader.build/sideloader/build/sideloader.py
|
2
|
19837
|
import os
import shutil
import yaml
from collections import namedtuple
from urlparse import urlparse
import deploy_types
from config_files import ConfigFiles
from utils import cmd, create_venv_paths, listdir_abs, log, rmtree_if_exists
class Workspace(object):
"""
Keeps track of the various file paths in the workspace and downloads the
source from github.
"""
debug = False
_cmd = lambda self, *args, **kwargs: cmd(*args, debug=self.debug, **kwargs)
def __init__(self, workspace_id, workspace_base, install_location, repo):
self.install_location = install_location
self.repo = repo
self._init_paths(workspace_id, workspace_base, install_location)
def _init_paths(self, workspace_id, workspace_base, install_location):
""" Initialise the paths to various directories in the workspace. """
self._dir = os.path.abspath(os.path.join(workspace_base, workspace_id))
package_path = os.path.join(self._dir, 'package')
dirs = {
'repo': os.path.join(self._dir, self.repo.name),
'build': os.path.join(self._dir, 'build'),
'package': package_path,
'install': os.path.join(package_path,
install_location.lstrip('/'))
}
self._dirs = namedtuple('WorkspaceDirs', dirs.keys())(**dirs)
def set_up(self):
"""
Create the workspace and fetch the repo.
"""
self.create_clean_workspace()
self.fetch_repo()
def create_clean_workspace(self):
"""
Create the workspace directory if it doesn't exist or clean it out.
"""
if os.path.exists(self._dir):
self.clean_workspace()
else:
os.makedirs(self._dir)
def clean_workspace(self):
""" Clean up the workspace directory (but not the virtualenv). """
for path in self._dirs:
rmtree_if_exists(path)
def fetch_repo(self):
""" Clone the repo and checkout the desired branch. """
log('Fetching github repo')
self._cmd(['git', 'clone', self.repo.url, self._dirs.repo])
self._cmd(['git', '-C', self._dirs.repo, 'checkout', self.repo.branch])
def load_deploy(self, deploy_file='.deploy.yaml'):
"""
Load the .deploy.yaml file in the repo or fallback to the default
settings if one could not be found. Merge any overrides.
"""
if not os.path.exists(self._dirs.repo):
log('WARNING: Repo directory not found. Has it been fetched yet?')
deploy_file_path = self.get_repo_path(deploy_file)
if os.path.exists(deploy_file_path):
return Deploy.from_deploy_file(deploy_file_path)
else:
log('No deploy file found, continuing with defaults')
return Deploy()
def make_build_dir(self):
"""
Create the build directory (the workspace directory should already
exist).
"""
os.mkdir(self._dirs.build)
def make_package_dir(self):
"""
Create the package directory (the workspace directory should already
exist).
"""
os.mkdir(self._dirs.package)
def make_install_dir(self):
"""
Create the install directory (the package directory should already
exist).
"""
package_relpath = os.path.relpath(self._dirs.install,
self._dirs.package)
parts = package_relpath.split(os.sep)
subpath = self._dirs.package
for part in parts:
subpath = os.path.join(subpath, part)
os.mkdir(subpath)
def get_path(self, *paths):
""" Get a path within the workspace directory. """
return os.path.join(self._dir, *paths)
def get_package_path(self, *paths):
""" Get a path within the package directory. """
return os.path.join(self._dirs.package, *paths)
def get_build_path(self, *paths):
""" Get a path within the build directory. """
return os.path.join(self._dirs.build, *paths)
def get_repo_path(self, *paths):
""" Get a path within the repo directory. """
return os.path.join(self._dirs.repo, *paths)
def get_install_path(self, *paths):
""" Get a path within the install directory. """
return os.path.join(self._dirs.install, *paths)
class Build(object):
debug = False
_cmd = lambda self, *args, **kwargs: cmd(*args, debug=self.debug, **kwargs)
def __init__(self, workspace, deploy, deploy_type):
"""
:param: workspace:
The workspace to build in.
:param: deploy:
The definition of the deployment as loaded from the project's deploy
file.
:param: deploy_type:
The deploy type object describing the type of the deploy.
"""
self.workspace = workspace
self.deploy = deploy
self.deploy_type = deploy_type
self.venv_paths = create_venv_paths(workspace.get_path())
def build(self):
"""
Build the workspace. Gets everything into a state that is ready for
packaging.
"""
self.prepare_environment()
self.run_buildscript()
self.copy_files()
self.freeze_virtualenv()
self.create_postinstall_script()
def prepare_environment(self):
"""
Prepare the workspace so that everything is ready to run the
buildscript including the virtualenv, build directory, and environment
variables.
"""
self.create_build_virtualenv()
self.workspace.make_build_dir()
self.put_env_variables()
def create_build_virtualenv(self):
""" Create a virtualenv for the build and install the dependencies. """
log('Creating virtualenv')
# Create clean virtualenv
if not os.path.exists(self.venv_paths.python):
self._cmd(['virtualenv', self.venv_paths.venv])
log('Upgrading pip')
self._cmd([self.venv_paths.pip, 'install', '--upgrade', 'pip'])
log('Installing pip dependencies')
# Install things
for dep in self.deploy.pip:
log('Installing %s' % (dep))
self._cmd([self.venv_paths.pip, 'install', '--upgrade', dep])
def put_env_variables(self):
""" Initialises the current working environment. """
env = {
'VENV': self.venv_paths.venv,
'PIP': self.venv_paths.pip,
'REPO': self.workspace.repo.name,
'BRANCH': self.workspace.repo.branch,
'WORKSPACE': self.workspace.get_path(),
'BUILDDIR': self.workspace.get_build_path(),
'INSTALLDIR': self.workspace.get_install_path(),
'NAME': self.deploy.name,
'PATH': ':'.join([self.venv_paths.bin, os.getenv('PATH')])
}
for k, v in env.items():
os.environ[k] = v
def run_buildscript(self):
"""
Run the buildscript for the project if one has been specified.
"""
if not self.deploy.buildscript:
return
buildscript_path = self.workspace.get_repo_path(
self.deploy.buildscript)
self._cmd(['chmod', 'a+x', buildscript_path])
# Push package directory before running build script
old_cwd = os.getcwd()
os.chdir(self.workspace.get_path())
self._cmd([buildscript_path])
# Pop directory
os.chdir(old_cwd)
def copy_files(self):
""" Copy the build and nginx/supervisor config files. """
log('Preparing package')
self.workspace.make_package_dir()
self.copy_build()
self.copy_config_files()
def copy_build(self):
""" Copy build contents to install location. """
self.workspace.make_install_dir()
for directory in os.listdir(self.workspace.get_build_path()):
shutil.copytree(self.workspace.get_build_path(directory),
self.workspace.get_install_path(directory))
def copy_config_files(self):
"""
Copy the config files specified in the deploy over to the relevant
config directory within the package.
"""
for config_files in self.deploy.config_files:
config_dir_path = self.workspace.get_package_path(
config_files.config_dir_path)
os.makedirs(config_dir_path)
for config_file in config_files.files:
shutil.copy(self.workspace.get_build_path(config_file),
config_dir_path)
def freeze_virtualenv(self):
""" Freeze post build requirements. """
freeze_output = self._cmd([self.venv_paths.pip, 'freeze'])
requirements_path = self.workspace.get_install_path(
'%s-requirements.pip' % self.deploy.name)
with open(requirements_path, 'w') as requirements_file:
requirements_file.write(freeze_output)
def create_postinstall_script(self):
""" Generate the postinstall script and write it to disk. """
content = self.generate_postinstall_script()
if self.debug:
log(content)
self.write_postinstall_script(content)
def generate_postinstall_script(self):
""" Generate the contents of the postinstall script. """
log('Constructing postinstall script')
# Insert some scripting before the user's script to set up...
set_up = self.deploy_type.get_set_up_script(
self.workspace, self.deploy)
# ...and afterwards to tear down.
tear_down = self.deploy_type.get_tear_down_script()
user_postinstall = ''
if self.deploy.postinstall:
user_postinstall = self.read_postinstall_file()
return """#!/bin/bash
{set_up}
INSTALLDIR={installdir}
REPO={repo}
BRANCH={branch}
NAME={name}
{user_postinstall}
{tear_down}
""".format(
set_up=set_up,
tear_down=tear_down,
installdir=self.workspace.get_install_path(),
repo=self.workspace.repo.name,
branch=self.workspace.repo.branch,
name=self.deploy.name,
user_postinstall=user_postinstall)
def read_postinstall_file(self):
""" Read the user's postinstall file. """
postinstall_path = self.workspace.get_repo_path(
self.deploy.postinstall)
with open(postinstall_path) as postinstall_file:
return postinstall_file.read()
def write_postinstall_script(self, content):
""" Write the final postinstall script. """
postinstall_path = self.workspace.get_path('postinstall.sh')
with open(postinstall_path, 'w') as postinstall_file:
postinstall_file.write(content)
os.chmod(postinstall_path, 0755)
class Package(object):
debug = False
sign = True
_cmd = lambda self, *args, **kwargs: cmd(*args, debug=self.debug, **kwargs)
def __init__(self, workspace, deploy, deploy_type, target='deb',
gpg_key=None):
self.workspace = workspace
self.deploy = deploy
self.deploy_type = deploy_type
self.target = target
self.gpg_key = gpg_key
def package(self):
self.run_fpm()
self.sign_debs()
def run_fpm(self):
""" Run the fpm command that builds the package. """
log('Building .%s package' % self.target)
fpm = [
'fpm',
'-C', self.workspace.get_package_path(),
'-p', self.workspace.get_package_path(),
'-s', self.deploy_type.fpm_deploy_type,
'-t', self.target,
'-a', 'amd64',
'-n', self.deploy.name,
'--after-install', self.workspace.get_path('postinstall.sh'),
]
if not self.deploy_type.provides_version:
fpm += ['-v', self.deploy.version]
fpm += sum([['-d', dep] for dep in self.list_all_dependencies()], [])
if self.deploy.user:
fpm += ['--%s-user' % self.target, self.deploy.user]
if self.debug:
fpm.append('--debug')
fpm += self.deploy_type.get_fpm_args(self.workspace._dirs)
self._cmd(fpm)
log('Build completed successfully')
def list_all_dependencies(self):
""" Get a list of all the package dependencies. """
deps = []
# Dependencies defined in the deploy file
deps += self.deploy.dependencies
# Dependencies from the deployment type
deps += self.deploy_type.dependencies
# Dependencies from the config files
for config_files in self.deploy.config_files:
deps += config_files.dependencies
return deps
def sign_debs(self):
""" Sign the .deb file with the configured gpg key. """
if self.gpg_key is None:
log('No GPG key configured, skipping signing')
return
log('Signing package')
# Find all the .debs in the directory and indiscriminately sign them
# (there should only be 1)
# TODO: Get the actual package name from fpm
debs = [path for path in listdir_abs(self.workspace.get_package_path())
if os.path.splitext(path)[1] == '.deb']
for deb in debs:
self._cmd(
['dpkg-sig', '-k', self.gpg_key, '--sign', 'builder', deb])
class Sideloader(object):
def __init__(self, config_path, github_url, branch=None, workspace_id=None,
debug=False):
self.config = Config.from_config_file(config_path)
self.repo = self._create_git_repo(github_url, branch)
self.workspace_id = (workspace_id if workspace_id is not None
else self.repo.name)
self.debug = debug
def _create_git_repo(self, github_url, branch):
branch = branch if branch is not None else self.config.default_branch
return GitRepo.from_github_url(github_url, branch)
def run(self, deploy_file='.deploy.yaml', dtype='virtualenv', target='deb',
build_num=None, sign=True, **deploy_overrides):
workspace = self._create_workspace()
workspace.set_up()
deploy = self._load_deploy(workspace, deploy_file, build_num,
**deploy_overrides)
deploy_type = self._get_deploy_type(dtype)
build = self._create_build(workspace, deploy, deploy_type)
build.build()
package = self._create_package(workspace, deploy, deploy_type, target,
sign)
package.package()
def _create_workspace(self):
workspace = Workspace(self.workspace_id, self.config.workspace_base,
self.config.install_location, self.repo)
workspace.debug = self.debug
return workspace
def _load_deploy(self, workspace, deploy_file, build_num,
**deploy_overrides):
if 'version' not in deploy_overrides:
if build_num is None:
build_num = 1
deploy_overrides['version'] = '0.%s' % build_num
deploy = workspace.load_deploy(deploy_file)
deploy = deploy.override(**deploy_overrides)
return deploy
def _get_deploy_type(self, deploy_type_str):
if deploy_type_str == 'python':
return deploy_types.Python()
elif deploy_type_str == 'virtualenv':
return deploy_types.VirtualEnv()
return deploy_types.DeployType()
def _create_build(self, workspace, deploy, deploy_type):
build = Build(workspace, deploy, deploy_type)
build.debug = self.debug
return build
def _create_package(self, workspace, deploy, deploy_type, target, sign):
package = Package(workspace, deploy, deploy_type, target,
self.config.gpg_key)
package.sign = sign
package.debug = self.debug
return package
class Config(object):
"""
Container class for Sideloader config, typically loaded from 'config.yaml'.
"""
def __init__(self, install_location, default_branch, workspace_base,
gpg_key):
self.install_location = install_location
self.default_branch = default_branch
self.workspace_base = workspace_base
self.gpg_key = gpg_key
@classmethod
def from_config_file(cls, config_file_path):
with open(config_file_path) as config_file:
config_yaml = yaml.load(config_file)
return Config(
config_yaml['install_location'],
config_yaml.get('default_branch', 'develop'),
config_yaml.get('workspace_base', '/workspace'),
config_yaml.get('gpg_key')
)
class GitRepo(object):
def __init__(self, url, branch, name):
self.url = url
self.branch = branch
self.name = name
@classmethod
def from_github_url(cls, github_url, branch):
parse_result = urlparse(github_url)
path_segments = parse_result.path.strip('/').split('/')
name = path_segments[1].rstrip('.git')
return GitRepo(github_url, branch, name)
class Deploy(object):
def __init__(self, name=None, buildscript=None, postinstall=None,
config_files=[], pip=[], dependencies=[],
virtualenv_prefix=None, allow_broken_build=False, user=None,
version=None):
"""
Container class for deploy prefernces, typically loaded from the
project's '.deploy.yaml' file.
"""
self.name = name
self.buildscript = buildscript
self.postinstall = postinstall
self.config_files = config_files
self.pip = pip
self.dependencies = dependencies
self.virtualenv_prefix = virtualenv_prefix
self.allow_broken_build = allow_broken_build
self.user = user
self.version = version
@classmethod
def from_deploy_file(cls, deploy_file_path):
with open(deploy_file_path) as deploy_file:
deploy_yaml = yaml.load(deploy_file)
config_files = []
nginx_files = deploy_yaml.get('nginx')
if nginx_files:
config_files.append(ConfigFiles.nginx(nginx_files))
supervisor_files = deploy_yaml.get('supervisor')
if supervisor_files:
config_files.append(ConfigFiles.supervisor(supervisor_files))
return Deploy(
deploy_yaml.get('name'),
deploy_yaml.get('buildscript'),
deploy_yaml.get('postinstall'),
config_files,
deploy_yaml.get('pip', []),
deploy_yaml.get('dependencies'),
deploy_yaml.get('virtualenv_prefix'),
deploy_yaml.get('allow_broken_build', False),
deploy_yaml.get('user'),
deploy_yaml.get('version')
)
def override(self, **overrides):
"""
Override attributes in this Deploy instance and return a new instance
with the values given. Overrides with a None value will be ignored.
"""
attrs = ['name', 'buildscript', 'postinstall', 'config_files', 'pip',
'dependencies', 'virtualenv_prefix', 'allow_broken_build',
'user', 'version']
for override in overrides.keys():
if override not in attrs:
raise ValueError('Deploy has no attribute \'%s\'' % override)
kwargs = {}
for attr in attrs:
kwargs[attr] = getattr(self, attr)
if attr in overrides:
value = overrides[attr]
if value is not None:
kwargs[attr] = value
return Deploy(**kwargs)
|
mit
|
Geode/geonode
|
geonode/layers/populate_layers_data.py
|
18
|
6643
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from geonode.layers.models import Style, Attribute, Layer
styles = [{"name": "test_style_1",
"sld_url": "http://localhost:8080/geoserver/rest/styles/test_style.sld",
"sld_body": "<?xml version=\"1.0\" encoding=\"UTF-8\"?><sld:StyledLayerDescriptor \
xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" \
xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" \
version=\"1.0.0\"><sld:NamedLayer><sld:Name>test_style_1</sld:Name><sld:UserStyle>\
<sld:Name>test_style_1</sld:Name><sld:Title/><sld:FeatureTypeStyle><sld:Name>name</sld:Name>\
<sld:Rule><sld:PolygonSymbolizer><sld:Fill><sld:CssParameter name=\"fill\">#888800</sld:CssParameter>\
</sld:Fill><sld:Stroke><sld:CssParameter name=\"stroke\">#ffffbb</sld:CssParameter>\
<sld:CssParameter name=\"stroke-width\">0.7</sld:CssParameter></sld:Stroke>\
</sld:PolygonSymbolizer></sld:Rule></sld:FeatureTypeStyle></sld:UserStyle>\
</sld:NamedLayer></sld:StyledLayerDescriptor>",
},
{"name": "test_style_2",
"sld_url": "http://localhost:8080/geoserver/rest/styles/test_style.sld",
"sld_body": "<?xml version=\"1.0\" encoding=\"UTF-8\"?><sld:StyledLayerDescriptor \
xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" \
xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" \
version=\"1.0.0\"><sld:NamedLayer><sld:Name>test_style_2</sld:Name><sld:UserStyle>\
<sld:Name>test_style_2</sld:Name><sld:Title/><sld:FeatureTypeStyle><sld:Name>name</sld:Name>\
<sld:Rule><sld:PolygonSymbolizer><sld:Fill><sld:CssParameter name=\"fill\">#888800</sld:CssParameter>\
</sld:Fill><sld:Stroke><sld:CssParameter name=\"stroke\">#ffffbb</sld:CssParameter>\
<sld:CssParameter name=\"stroke-width\">0.7</sld:CssParameter></sld:Stroke></sld:PolygonSymbolizer>\
</sld:Rule></sld:FeatureTypeStyle></sld:UserStyle></sld:NamedLayer></sld:StyledLayerDescriptor>",
},
{"name": "test_style_3",
"sld_url": "http://localhost:8080/geoserver/rest/styles/test_style.sld",
"sld_body": "<?xml version=\"1.0\" encoding=\"UTF-8\"?><sld:StyledLayerDescriptor \
xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" \
xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" \
version=\"1.0.0\"><sld:NamedLayer><sld:Name>test_style_3</sld:Name><sld:UserStyle>\
<sld:Name>test_style_3</sld:Name><sld:Title/><sld:FeatureTypeStyle><sld:Name>name</sld:Name>\
<sld:Rule><sld:PolygonSymbolizer><sld:Fill><sld:CssParameter name=\"fill\">#888800</sld:CssParameter>\
</sld:Fill><sld:Stroke><sld:CssParameter name=\"stroke\">#ffffbb</sld:CssParameter><sld:CssParameter \
name=\"stroke-width\">0.7</sld:CssParameter></sld:Stroke></sld:PolygonSymbolizer></sld:Rule>\
</sld:FeatureTypeStyle></sld:UserStyle></sld:NamedLayer></sld:StyledLayerDescriptor>",
},
{"name": "Evaluación",
"sld_url": "http://localhost:8080/geoserver/rest/styles/test_style.sld",
"sld_body": "<?xml version=\"1.0\" encoding=\"UTF-8\"?><sld:StyledLayerDescriptor \
xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" \
xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" version=\"1.0.0\">\
<sld:NamedLayer><sld:Name>test_style_3</sld:Name><sld:UserStyle><sld:Name>test_style_3</sld:Name>\
<sld:Title/><sld:FeatureTypeStyle><sld:Name>name</sld:Name><sld:Rule><sld:PolygonSymbolizer><sld:Fill>\
<sld:CssParameter name=\"fill\">#888800</sld:CssParameter></sld:Fill><sld:Stroke><sld:CssParameter \
name=\"stroke\">#ffffbb</sld:CssParameter><sld:CssParameter name=\"stroke-width\">0.7</sld:CssParameter>\
</sld:Stroke></sld:PolygonSymbolizer></sld:Rule></sld:FeatureTypeStyle></sld:UserStyle></sld:NamedLayer>\
</sld:StyledLayerDescriptor>",
}]
attributes = [
{
"attribute": u'N\xfamero_De_M\xe9dicos',
"attribute_label": u'N\xfamero_De_M\xe9dicos',
"attribute_type": "xsd:string",
"visible": True,
"display_order": 4
},
{
"attribute": "the_geom",
"attribute_label": "Shape",
"attribute_type": "gml:Geometry",
"visible": False,
"display_order": 3
},
{
"attribute": "description",
"attribute_label": "Description",
"attribute_type": "xsd:string",
"visible": True,
"display_order": 2
},
{
"attribute": "place_name",
"attribute_label": "Place Name",
"attribute_type": "xsd:string",
"visible": True,
"display_order": 1
}
]
def create_layer_data():
layer = Layer.objects.get(pk=1)
for style in styles:
new_style = Style.objects.create(
name=style['name'],
sld_url=style['sld_url'],
sld_body=style['sld_body'])
layer.styles.add(new_style)
layer.default_style = new_style
layer.save()
for attr in attributes:
Attribute.objects.create(layer=layer,
attribute=attr['attribute'],
attribute_label=attr['attribute_label'],
attribute_type=attr['attribute_type'],
visible=attr['visible'],
display_order=attr['display_order']
)
|
gpl-3.0
|
minepy/mictools
|
mictools/utils.py
|
1
|
3005
|
## Copyright 2017 MICtools Developers <[email protected]>
## This file is part of MICtools.
##
## MICtools is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## MICtools is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with MICtools. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
import minepy
def sstats(X, Y, alpha=0.6, c=15, est="mic_approx"):
mic, tic = [], []
mine = minepy.MINE(alpha=alpha, c=c, est=est)
for i in range(min(X.shape[0], Y.shape[0])):
mine.compute_score(X[i], Y[i])
mic.append(mine.mic())
tic.append(mine.tic(norm=True))
mic, tic = np.asarray(mic), np.asarray(tic)
return mic, tic
def read_table(input_fn):
"""Read table.
"""
table = pd.read_csv(input_fn, sep='\t', index_col=0)
# cast index into string
table.index = [str(elem) for elem in table.index]
return table
def check_data(X, Y=None, labels=None):
if not (Y is None):
if X.shape[1] != Y.shape[1]:
raise ValueError("different number of samples between X and Y")
if not (X.columns == Y.columns).all():
raise ValueError("sample names mismatch between X and Y")
if not (labels is None):
if X.shape[1] != labels.shape[0]:
raise ValueError("different number of samples between X and labels")
if not (X.columns == labels.index).all():
raise ValueError("sample names mismatch between X and labels")
def read_data(xvars_fn, labels_fn=None, target=None, yvars_fn=None):
X = read_table(xvars_fn)
sample_ids = sorted(X.columns)
labels = None
Y = None
if not labels_fn is None:
if target is None:
raise ValueError("target (labels file column) is required when the "
"labels file is provided")
labels_df = read_table(labels_fn)
if not (target in labels_df.columns):
raise Exception("target {} is not in the labels file".\
format(target))
labels_df = labels_df.loc[labels_df[target].notnull(), target]
labels = labels_df.astype(str)
sample_ids = sorted(set(sample_ids) & set(labels.index))
if not yvars_fn is None:
Y = read_table(yvars_fn)
sample_ids = sorted(set(sample_ids) & set(Y.columns))
Y = Y[sample_ids]
X = X[sample_ids]
if not labels_fn is None:
labels = labels[sample_ids]
return X, labels, Y
|
gpl-3.0
|
anntzer/scipy
|
scipy/optimize/_shgo_lib/triangulation.py
|
11
|
21463
|
import numpy as np
import copy
class Complex:
def __init__(self, dim, func, func_args=(), symmetry=False, bounds=None,
g_cons=None, g_args=()):
self.dim = dim
self.bounds = bounds
self.symmetry = symmetry # TODO: Define the functions to be used
# here in init to avoid if checks
self.gen = 0
self.perm_cycle = 0
# Every cell is stored in a list of its generation,
# e.g., the initial cell is stored in self.H[0]
# 1st get new cells are stored in self.H[1] etc.
# When a cell is subgenerated it is removed from this list
self.H = [] # Storage structure of cells
# Cache of all vertices
self.V = VertexCache(func, func_args, bounds, g_cons, g_args)
# Generate n-cube here:
self.n_cube(dim, symmetry=symmetry)
# TODO: Assign functions to a the complex instead
if symmetry:
self.generation_cycle = 1
# self.centroid = self.C0()[-1].x
# self.C0.centroid = self.centroid
else:
self.add_centroid()
self.H.append([])
self.H[0].append(self.C0)
self.hgr = self.C0.homology_group_rank()
self.hgrd = 0 # Complex group rank differential
# self.hgr = self.C0.hg_n
# Build initial graph
self.graph_map()
self.performance = []
self.performance.append(0)
self.performance.append(0)
def __call__(self):
return self.H
def n_cube(self, dim, symmetry=False, printout=False):
"""
Generate the simplicial triangulation of the N-D hypercube
containing 2**n vertices
"""
origin = list(np.zeros(dim, dtype=int))
self.origin = origin
supremum = list(np.ones(dim, dtype=int))
self.supremum = supremum
# tuple versions for indexing
origintuple = tuple(origin)
supremumtuple = tuple(supremum)
x_parents = [origintuple]
if symmetry:
self.C0 = Simplex(0, 0, 0, self.dim) # Initial cell object
self.C0.add_vertex(self.V[origintuple])
i_s = 0
self.perm_symmetry(i_s, x_parents, origin)
self.C0.add_vertex(self.V[supremumtuple])
else:
self.C0 = Cell(0, 0, origin, supremum) # Initial cell object
self.C0.add_vertex(self.V[origintuple])
self.C0.add_vertex(self.V[supremumtuple])
i_parents = []
self.perm(i_parents, x_parents, origin)
if printout:
print("Initial hyper cube:")
for v in self.C0():
v.print_out()
def perm(self, i_parents, x_parents, xi):
# TODO: Cut out of for if outside linear constraint cutting planes
xi_t = tuple(xi)
# Construct required iterator
iter_range = [x for x in range(self.dim) if x not in i_parents]
for i in iter_range:
i2_parents = copy.copy(i_parents)
i2_parents.append(i)
xi2 = copy.copy(xi)
xi2[i] = 1
# Make new vertex list a hashable tuple
xi2_t = tuple(xi2)
# Append to cell
self.C0.add_vertex(self.V[xi2_t])
# Connect neighbors and vice versa
# Parent point
self.V[xi2_t].connect(self.V[xi_t])
# Connect all family of simplices in parent containers
for x_ip in x_parents:
self.V[xi2_t].connect(self.V[x_ip])
x_parents2 = copy.copy(x_parents)
x_parents2.append(xi_t)
# Permutate
self.perm(i2_parents, x_parents2, xi2)
def perm_symmetry(self, i_s, x_parents, xi):
# TODO: Cut out of for if outside linear constraint cutting planes
xi_t = tuple(xi)
xi2 = copy.copy(xi)
xi2[i_s] = 1
# Make new vertex list a hashable tuple
xi2_t = tuple(xi2)
# Append to cell
self.C0.add_vertex(self.V[xi2_t])
# Connect neighbors and vice versa
# Parent point
self.V[xi2_t].connect(self.V[xi_t])
# Connect all family of simplices in parent containers
for x_ip in x_parents:
self.V[xi2_t].connect(self.V[x_ip])
x_parents2 = copy.copy(x_parents)
x_parents2.append(xi_t)
i_s += 1
if i_s == self.dim:
return
# Permutate
self.perm_symmetry(i_s, x_parents2, xi2)
def add_centroid(self):
"""Split the central edge between the origin and supremum of
a cell and add the new vertex to the complex"""
self.centroid = list(
(np.array(self.origin) + np.array(self.supremum)) / 2.0)
self.C0.add_vertex(self.V[tuple(self.centroid)])
self.C0.centroid = self.centroid
# Disconnect origin and supremum
self.V[tuple(self.origin)].disconnect(self.V[tuple(self.supremum)])
# Connect centroid to all other vertices
for v in self.C0():
self.V[tuple(self.centroid)].connect(self.V[tuple(v.x)])
self.centroid_added = True
return
# Construct incidence array:
def incidence(self):
if self.centroid_added:
self.structure = np.zeros([2 ** self.dim + 1, 2 ** self.dim + 1],
dtype=int)
else:
self.structure = np.zeros([2 ** self.dim, 2 ** self.dim],
dtype=int)
for v in self.HC.C0():
for v2 in v.nn:
self.structure[v.index, v2.index] = 1
return
# A more sparse incidence generator:
def graph_map(self):
""" Make a list of size 2**n + 1 where an entry is a vertex
incidence, each list element contains a list of indexes
corresponding to that entries neighbors"""
self.graph = [[v2.index for v2 in v.nn] for v in self.C0()]
# Graph structure method:
# 0. Capture the indices of the initial cell.
# 1. Generate new origin and supremum scalars based on current generation
# 2. Generate a new set of vertices corresponding to a new
# "origin" and "supremum"
# 3. Connected based on the indices of the previous graph structure
# 4. Disconnect the edges in the original cell
def sub_generate_cell(self, C_i, gen):
"""Subgenerate a cell `C_i` of generation `gen` and
homology group rank `hgr`."""
origin_new = tuple(C_i.centroid)
centroid_index = len(C_i()) - 1
# If not gen append
try:
self.H[gen]
except IndexError:
self.H.append([])
# Generate subcubes using every extreme vertex in C_i as a supremum
# and the centroid of C_i as the origin
H_new = [] # list storing all the new cubes split from C_i
for i, v in enumerate(C_i()[:-1]):
supremum = tuple(v.x)
H_new.append(
self.construct_hypercube(origin_new, supremum, gen, C_i.hg_n))
for i, connections in enumerate(self.graph):
# Present vertex V_new[i]; connect to all connections:
if i == centroid_index: # Break out of centroid
break
for j in connections:
C_i()[i].disconnect(C_i()[j])
# Destroy the old cell
if C_i is not self.C0: # Garbage collector does this anyway; not needed
del C_i
# TODO: Recalculate all the homology group ranks of each cell
return H_new
def split_generation(self):
"""
Run sub_generate_cell for every cell in the current complex self.gen
"""
no_splits = False # USED IN SHGO
try:
for c in self.H[self.gen]:
if self.symmetry:
# self.sub_generate_cell_symmetry(c, self.gen + 1)
self.split_simplex_symmetry(c, self.gen + 1)
else:
self.sub_generate_cell(c, self.gen + 1)
except IndexError:
no_splits = True # USED IN SHGO
self.gen += 1
return no_splits # USED IN SHGO
def construct_hypercube(self, origin, supremum, gen, hgr,
printout=False):
"""
Build a hypercube with triangulations symmetric to C0.
Parameters
----------
origin : vec
supremum : vec (tuple)
gen : generation
hgr : parent homology group rank
"""
# Initiate new cell
v_o = np.array(origin)
v_s = np.array(supremum)
C_new = Cell(gen, hgr, origin, supremum)
C_new.centroid = tuple((v_o + v_s) * .5)
# Build new indexed vertex list
V_new = []
for i, v in enumerate(self.C0()[:-1]):
v_x = np.array(v.x)
sub_cell_t1 = v_o - v_o * v_x
sub_cell_t2 = v_s * v_x
vec = sub_cell_t1 + sub_cell_t2
vec = tuple(vec)
C_new.add_vertex(self.V[vec])
V_new.append(vec)
# Add new centroid
C_new.add_vertex(self.V[C_new.centroid])
V_new.append(C_new.centroid)
# Connect new vertices #TODO: Thread into other loop; no need for V_new
for i, connections in enumerate(self.graph):
# Present vertex V_new[i]; connect to all connections:
for j in connections:
self.V[V_new[i]].connect(self.V[V_new[j]])
if printout:
print("A sub hyper cube with:")
print("origin: {}".format(origin))
print("supremum: {}".format(supremum))
for v in C_new():
v.print_out()
# Append the new cell to the to complex
self.H[gen].append(C_new)
return C_new
def split_simplex_symmetry(self, S, gen):
"""
Split a hypersimplex S into two sub simplices by building a hyperplane
which connects to a new vertex on an edge (the longest edge in
dim = {2, 3}) and every other vertex in the simplex that is not
connected to the edge being split.
This function utilizes the knowledge that the problem is specified
with symmetric constraints
The longest edge is tracked by an ordering of the
vertices in every simplices, the edge between first and second
vertex is the longest edge to be split in the next iteration.
"""
# If not gen append
try:
self.H[gen]
except IndexError:
self.H.append([])
# Find new vertex.
# V_new_x = tuple((np.array(C()[0].x) + np.array(C()[1].x)) / 2.0)
s = S()
firstx = s[0].x
lastx = s[-1].x
V_new = self.V[tuple((np.array(firstx) + np.array(lastx)) / 2.0)]
# Disconnect old longest edge
self.V[firstx].disconnect(self.V[lastx])
# Connect new vertices to all other vertices
for v in s[:]:
v.connect(self.V[V_new.x])
# New "lower" simplex
S_new_l = Simplex(gen, S.hg_n, self.generation_cycle,
self.dim)
S_new_l.add_vertex(s[0])
S_new_l.add_vertex(V_new) # Add new vertex
for v in s[1:-1]: # Add all other vertices
S_new_l.add_vertex(v)
# New "upper" simplex
S_new_u = Simplex(gen, S.hg_n, S.generation_cycle, self.dim)
# First vertex on new long edge
S_new_u.add_vertex(s[S_new_u.generation_cycle + 1])
for v in s[1:-1]: # Remaining vertices
S_new_u.add_vertex(v)
for k, v in enumerate(s[1:-1]): # iterate through inner vertices
if k == S.generation_cycle:
S_new_u.add_vertex(V_new)
else:
S_new_u.add_vertex(v)
S_new_u.add_vertex(s[-1]) # Second vertex on new long edge
self.H[gen].append(S_new_l)
self.H[gen].append(S_new_u)
return
# Plots
def plot_complex(self):
"""
Here, C is the LIST of simplexes S in the
2- or 3-D complex
To plot a single simplex S in a set C, use e.g., [C[0]]
"""
from matplotlib import pyplot # type: ignore[import]
if self.dim == 2:
pyplot.figure()
for C in self.H:
for c in C:
for v in c():
if self.bounds is None:
x_a = np.array(v.x, dtype=float)
else:
x_a = np.array(v.x, dtype=float)
for i in range(len(self.bounds)):
x_a[i] = (x_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('v.x_a = {}'.format(x_a))
pyplot.plot([x_a[0]], [x_a[1]], 'o')
xlines = []
ylines = []
for vn in v.nn:
if self.bounds is None:
xn_a = np.array(vn.x, dtype=float)
else:
xn_a = np.array(vn.x, dtype=float)
for i in range(len(self.bounds)):
xn_a[i] = (xn_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('vn.x = {}'.format(vn.x))
xlines.append(xn_a[0])
ylines.append(xn_a[1])
xlines.append(x_a[0])
ylines.append(x_a[1])
pyplot.plot(xlines, ylines)
if self.bounds is None:
pyplot.ylim([-1e-2, 1 + 1e-2])
pyplot.xlim([-1e-2, 1 + 1e-2])
else:
pyplot.ylim(
[self.bounds[1][0] - 1e-2, self.bounds[1][1] + 1e-2])
pyplot.xlim(
[self.bounds[0][0] - 1e-2, self.bounds[0][1] + 1e-2])
pyplot.show()
elif self.dim == 3:
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
for C in self.H:
for c in C:
for v in c():
x = []
y = []
z = []
# logging.info('v.x = {}'.format(v.x))
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
for vn in v.nn:
x.append(vn.x[0])
y.append(vn.x[1])
z.append(vn.x[2])
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
# logging.info('vn.x = {}'.format(vn.x))
ax.plot(x, y, z, label='simplex')
pyplot.show()
else:
print("dimension higher than 3 or wrong complex format")
return
class VertexGroup:
def __init__(self, p_gen, p_hgr):
self.p_gen = p_gen # parent generation
self.p_hgr = p_hgr # parent homology group rank
self.hg_n = None
self.hg_d = None
# Maybe add parent homology group rank total history
# This is the sum off all previously split cells
# cumulatively throughout its entire history
self.C = []
def __call__(self):
return self.C
def add_vertex(self, V):
if V not in self.C:
self.C.append(V)
def homology_group_rank(self):
"""
Returns the homology group order of the current cell
"""
if self.hg_n is None:
self.hg_n = sum(1 for v in self.C if v.minimiser())
return self.hg_n
def homology_group_differential(self):
"""
Returns the difference between the current homology group of the
cell and its parent group
"""
if self.hg_d is None:
self.hgd = self.hg_n - self.p_hgr
return self.hgd
def polytopial_sperner_lemma(self):
"""
Returns the number of stationary points theoretically contained in the
cell based information currently known about the cell
"""
pass
def print_out(self):
"""
Print the current cell to console
"""
for v in self():
v.print_out()
class Cell(VertexGroup):
"""
Contains a cell that is symmetric to the initial hypercube triangulation
"""
def __init__(self, p_gen, p_hgr, origin, supremum):
super().__init__(p_gen, p_hgr)
self.origin = origin
self.supremum = supremum
self.centroid = None # (Not always used)
# TODO: self.bounds
class Simplex(VertexGroup):
"""
Contains a simplex that is symmetric to the initial symmetry constrained
hypersimplex triangulation
"""
def __init__(self, p_gen, p_hgr, generation_cycle, dim):
super().__init__(p_gen, p_hgr)
self.generation_cycle = (generation_cycle + 1) % (dim - 1)
class Vertex:
def __init__(self, x, bounds=None, func=None, func_args=(), g_cons=None,
g_cons_args=(), nn=None, index=None):
self.x = x
self.order = sum(x)
x_a = np.array(x, dtype=float)
if bounds is not None:
for i, (lb, ub) in enumerate(bounds):
x_a[i] = x_a[i] * (ub - lb) + lb
# TODO: Make saving the array structure optional
self.x_a = x_a
# Note Vertex is only initiated once for all x so only
# evaluated once
if func is not None:
self.feasible = True
if g_cons is not None:
for g, args in zip(g_cons, g_cons_args):
if g(self.x_a, *args) < 0.0:
self.f = np.inf
self.feasible = False
break
if self.feasible:
self.f = func(x_a, *func_args)
if nn is not None:
self.nn = nn
else:
self.nn = set()
self.fval = None
self.check_min = True
# Index:
if index is not None:
self.index = index
def __hash__(self):
return hash(self.x)
def connect(self, v):
if v is not self and v not in self.nn:
self.nn.add(v)
v.nn.add(self)
if self.minimiser():
v._min = False
v.check_min = False
# TEMPORARY
self.check_min = True
v.check_min = True
def disconnect(self, v):
if v in self.nn:
self.nn.remove(v)
v.nn.remove(self)
self.check_min = True
v.check_min = True
def minimiser(self):
"""Check whether this vertex is strictly less than all its neighbors"""
if self.check_min:
self._min = all(self.f < v.f for v in self.nn)
self.check_min = False
return self._min
def print_out(self):
print("Vertex: {}".format(self.x))
constr = 'Connections: '
for vc in self.nn:
constr += '{} '.format(vc.x)
print(constr)
print('Order = {}'.format(self.order))
class VertexCache:
def __init__(self, func, func_args=(), bounds=None, g_cons=None,
g_cons_args=(), indexed=True):
self.cache = {}
self.func = func
self.g_cons = g_cons
self.g_cons_args = g_cons_args
self.func_args = func_args
self.bounds = bounds
self.nfev = 0
self.size = 0
if indexed:
self.index = -1
def __getitem__(self, x, indexed=True):
try:
return self.cache[x]
except KeyError:
if indexed:
self.index += 1
xval = Vertex(x, bounds=self.bounds,
func=self.func, func_args=self.func_args,
g_cons=self.g_cons,
g_cons_args=self.g_cons_args,
index=self.index)
else:
xval = Vertex(x, bounds=self.bounds,
func=self.func, func_args=self.func_args,
g_cons=self.g_cons,
g_cons_args=self.g_cons_args)
# logging.info("New generated vertex at x = {}".format(x))
# NOTE: Surprisingly high performance increase if logging is commented out
self.cache[x] = xval
# TODO: Check
if self.func is not None:
if self.g_cons is not None:
if xval.feasible:
self.nfev += 1
self.size += 1
else:
self.size += 1
else:
self.nfev += 1
self.size += 1
return self.cache[x]
|
bsd-3-clause
|
acsone/server-tools
|
users_ldap_push/models/res_partner.py
|
16
|
1251
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
class ResPartner(models.Model):
_inherit = 'res.partner'
@api.multi
def write(self, vals):
result = super(ResPartner, self).write(vals)
self.filtered('user_ids.is_ldap_user').mapped('user_ids')\
.push_to_ldap(vals)
return result
|
agpl-3.0
|
socialplanning/opencore
|
opencore/browser/topnav/topnav.py
|
1
|
3065
|
"""
TopNav view classes.
"""
from Acquisition import aq_inner
from Products.CMFCore.utils import getToolByName
from Products.TeamSpace.permissions import ManageTeamMembership
from opencore.browser.base import BaseView
from opencore.browser.topnav.interfaces import ITopnavMenuItems
from opencore.i18n import _
from opencore.interfaces.message import ITransientMessage
from opencore.nui.contexthijack import HeaderHijackable
from opencore.content.page import OpenPage
from operator import itemgetter
from zope.component import getMultiAdapter
class TopNavView(HeaderHijackable):
"""
Provides req'd information for rendering top nav in any context.
"""
def contextmenu(self):
"""ask a viewlet manager for the context menu
HeaderHijackable takes care of making sure that the context
is set correctly if we are getting a request with certain
headers set to specify the context"""
manager = getMultiAdapter(
(self.context, self.request, self),
ITopnavMenuItems,
name="opencore.topnavmenu")
manager.update()
return manager.render()
@property
def usermenu(self):
if self.loggedin:
viewname = 'topnav-auth-user-menu'
else:
viewname = 'topnav-anon-user-menu'
view = getMultiAdapter((self.context, self.request), name=viewname)
return view.__of__(aq_inner(self.context))
class AnonMenuView(BaseView):
"""
View class for the user menu when user is anonymous.
"""
@property
def menudata(self):
site_url = getToolByName(self.context, 'portal_url')()
menudata = (
{'content': _(u'Sign in'),
'href': '%s/login' % site_url,
},
{'content': _(u'Create account'),
'href': '%s/join' % site_url,
},
)
return menudata
class AuthMenuView(BaseView):
"""
View class for the user menu when user is logged in.
"""
@property
def user_message_count(self):
"""
returns the number of transient messages currently stored
for the logged in member
"""
mem_id = self.loggedinmember.getId()
tm = ITransientMessage(self.portal)
t_msgs = tm.get_all_msgs(mem_id)
msg_count = sum([len(value) for key,value in t_msgs.iteritems() if not key == 'Trackback'])
query = dict(portal_type='OpenMembership',
getId=mem_id,
)
mship_brains = self.catalog(**query)
proj_invites = [brain for brain in mship_brains if brain.review_state == 'pending' and brain.lastWorkflowActor != mem_id]
return msg_count + len(proj_invites)
@property
def menudata(self):
mem_data = self.member_info
site_url = getToolByName(self.context, 'portal_url')()
menudata = (
{'content': _(u'Sign out'),
'href': '%s/logout' % site_url,
},
)
return menudata
|
gpl-3.0
|
ActiveState/code
|
recipes/Python/577217_Routines_programmatically_authenticating_Google_/recipe-577217.py
|
1
|
6516
|
"""
Routines for programmatically authenticating with the Google Accounts system at
Google App-Engine.
This takes two calls, one to the ClientLogin service of Google Accounts,
and then a second to the login frontend of App Engine.
User credentials are provided to the first, which responds with a token.
Passing that token to the _ah/login GAE endpoint then gives the cookie that can
be used to make further authenticated requests.
Give the ACSID cookie to the client so it stays logged in with the GAE integrated users
system.
One last issue, after succesful authentication the current user's ID is still
missing; User(email).user_id() won't work. Here I think a HTTP redirect
should make the client re-request (using the cookie) and login, but the client
would need to support that. Alternatively the ID can be fetched within the
current request by a r/w round trip to the datastore, see:
http://stackoverflow.com/questions/816372/how-can-i-determine-a-user-id-based-on-an-email-address-in-app-engine
See also: http://markmail.org/thread/tgth5vmdqjacaxbx
"""
import logging, md5, urllib, urllib2
def do_auth(appname, user, password, dev=False, admin=False):
"This is taken from bits of appcfg, specifically: "
" google/appengine/tools/appengine_rpc.py "
"It returns the cookie send by the App Engine Login "
"front-end after authenticating with Google Accounts. "
if dev:
return do_auth_dev_appserver(user, admin)
# get the token
try:
auth_token = get_google_authtoken(appname, user, password)
except AuthError, e:
if e.reason == "BadAuthentication":
logging.error( "Invalid username or password." )
if e.reason == "CaptchaRequired":
logging.error(
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
if e.reason == "NotVerified":
logging.error( "Account not verified.")
if e.reason == "TermsNotAgreed":
logging.error( "User has not agreed to TOS.")
if e.reason == "AccountDeleted":
logging.error( "The user account has been deleted.")
if e.reason == "AccountDisabled":
logging.error( "The user account has been disabled.")
if e.reason == "ServiceDisabled":
logging.error( "The user's access to the service has been "
"disabled.")
if e.reason == "ServiceUnavailable":
logging.error( "The service is not available; try again later.")
raise
# now get the cookie
cookie = get_gae_cookie(appname, auth_token)
assert cookie
return cookie
def do_auth_dev_appserver(email, admin):
"""Creates cookie payload data.
Args:
email, admin: Parameters to incorporate into the cookie.
Returns:
String containing the cookie payload.
"""
admin_string = 'False'
if admin:
admin_string = 'True'
if email:
user_id_digest = md5.new(email.lower()).digest()
user_id = '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20]
else:
user_id = ''
return 'dev_appserver_login="%s:%s:%s"; Path=/;' % (email, admin_string, user_id)
def get_gae_cookie(appname, auth_token):
"""
Send a token to the App Engine login, again stating the name of the
application to gain authentication for. Returned is a cookie that may be used
to authenticate HTTP traffic to the application at App Engine.
"""
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
host = "%s.appspot.com" % appname
url = "https://%s/_ah/login?%s" % (host,
urllib.urlencode(args))
opener = get_opener() # no redirect handler!
req = urllib2.Request(url)
try:
response = opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code,
response.msg, response.headers, response.fp)
cookie = response.headers.get('set-cookie')
assert cookie and cookie.startswith('ACSID')
return cookie.replace('; HttpOnly', '')
def get_google_authtoken(appname, email_address, password):
"""
Make secure connection to Google Accounts and retrieve an authorisation
token for the stated appname.
The token can be send to the login front-end at appengine using
get_gae_cookie(), which will return a cookie to use for the user session.
"""
opener = get_opener()
# get an AuthToken from Google accounts
auth_uri = 'https://www.google.com/accounts/ClientLogin'
authreq_data = urllib.urlencode({ "Email": email_address,
"Passwd": password,
"service": "ah",
"source": appname,
"accountType": "HOSTED_OR_GOOGLE" })
req = urllib2.Request(auth_uri, data=authreq_data)
try:
response = opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise AuthError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
class AuthError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
def get_opener(cookiejar=None):
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
opener.add_handler(urllib2.HTTPSHandler())
if cookiejar:
opener.add_handler(urllib2.HTTPCookieProcessor(cookiejar))
return opener
|
mit
|
SaganBolliger/nupic
|
src/nupic/encoders/multi.py
|
15
|
7688
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.encoders.base import Encoder
from nupic.encoders.scalar import ScalarEncoder
from nupic.encoders.adaptivescalar import AdaptiveScalarEncoder
from nupic.encoders.date import DateEncoder
from nupic.encoders.logenc import LogEncoder
from nupic.encoders.category import CategoryEncoder
from nupic.encoders.sdrcategory import SDRCategoryEncoder
from nupic.encoders.delta import DeltaEncoder
from nupic.encoders.scalarspace import ScalarSpaceEncoder
from nupic.encoders.pass_through_encoder import PassThroughEncoder
from nupic.encoders.sparse_pass_through_encoder import SparsePassThroughEncoder
from nupic.encoders.coordinate import CoordinateEncoder
from nupic.encoders.geospatial_coordinate import GeospatialCoordinateEncoder
# multiencoder must be imported last because it imports * from this module!
from nupic.encoders.utils import bitsToString
from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder
# Map class to Cap'n Proto schema union attribute
_CLASS_ATTR_MAP = {
ScalarEncoder: "scalarEncoder",
AdaptiveScalarEncoder: "adaptiveScalarEncoder",
DateEncoder: "dateEncoder",
LogEncoder: "logEncoder",
CategoryEncoder: "categoryEncoder",
CoordinateEncoder: "coordinateEncoder",
SDRCategoryEncoder: "sdrCategoryEncoder",
DeltaEncoder: "deltaEncoder",
PassThroughEncoder: "passThroughEncoder",
SparsePassThroughEncoder: "sparsePassThroughEncoder",
RandomDistributedScalarEncoder: "randomDistributedScalarEncoder"
}
# Invert for fast lookup in MultiEncoder.read()
_ATTR_CLASS_MAP = {value:key for key, value in _CLASS_ATTR_MAP.items()}
class MultiEncoder(Encoder):
"""A MultiEncoder encodes a dictionary or object with
multiple components. A MultiEncode contains a number
of sub-encoders, each of which encodes a separate component."""
# TODO expand this docstring to explain how the multiple encoders are combined
def __init__(self, encoderDescriptions=None):
self.width = 0
self.encoders = []
self.description = []
self.name = ''
if encoderDescriptions is not None:
self.addMultipleEncoders(encoderDescriptions)
def setFieldStats(self, fieldName, fieldStatistics ):
for (name, encoder, offset) in self.encoders:
encoder.setFieldStats(name, fieldStatistics)
def addEncoder(self, name, encoder):
self.encoders.append((name, encoder, self.width))
for d in encoder.getDescription():
self.description.append((d[0], d[1] + self.width))
self.width += encoder.getWidth()
self._flattenedEncoderList = None
self._flattenedFieldTypeList = None
def encodeIntoArray(self, obj, output):
for name, encoder, offset in self.encoders:
encoder.encodeIntoArray(self._getInputValue(obj, name), output[offset:])
def getDescription(self):
return self.description
def getWidth(self):
"""Represents the sum of the widths of each fields encoding."""
return self.width
def setLearning(self,learningEnabled):
encoders = self.getEncoderList()
for encoder in encoders:
encoder.setLearning(learningEnabled)
return
def encodeField(self, fieldName, value):
for name, encoder, offset in self.encoders:
if name == fieldName:
return encoder.encode(value)
def encodeEachField(self, inputRecord):
encodings = []
for name, encoder, offset in self.encoders:
encodings.append(encoder.encode(getattr(inputRecord, name)))
return encodings
def addMultipleEncoders(self, fieldEncodings):
"""
fieldEncodings -- a dict of dicts, mapping field names to the field params
dict.
Each field params dict has the following keys
1) data fieldname that matches the key ('fieldname')
2) an encoder type ('type')
3) and the encoder params (all other keys)
For example,
fieldEncodings={
'dateTime': dict(fieldname='dateTime', type='DateEncoder',
timeOfDay=(5,5)),
'attendeeCount': dict(fieldname='attendeeCount', type='ScalarEncoder',
name='attendeeCount', minval=0, maxval=250,
clipInput=True, w=5, resolution=10),
'consumption': dict(fieldname='consumption',type='ScalarEncoder',
name='consumption', minval=0,maxval=110,
clipInput=True, w=5, resolution=5),
}
would yield a vector with a part encoded by the DateEncoder,
and to parts seperately taken care of by the ScalarEncoder with the specified parameters.
The three seperate encodings are then merged together to the final vector, in such a way that
they are always at the same location within the vector.
"""
# Sort the encoders so that they end up in a controlled order
encoderList = sorted(fieldEncodings.items())
for key, fieldParams in encoderList:
if ':' not in key and fieldParams is not None:
fieldParams = fieldParams.copy()
fieldName = fieldParams.pop('fieldname')
encoderName = fieldParams.pop('type')
try:
self.addEncoder(fieldName, eval(encoderName)(**fieldParams))
except TypeError, e:
print ("#### Error in constructing %s encoder. Possibly missing "
"some required constructor parameters. Parameters "
"that were provided are: %s" % (encoderName, fieldParams))
raise
@classmethod
def read(cls, proto):
encoder = object.__new__(cls)
encoder.encoders = [None] * len(proto.encoders)
encoder.width = 0
for index, encoderProto in enumerate(proto.encoders):
# Identify which attr is set in union
encoderType = encoderProto.which()
encoderDetails = getattr(encoderProto, encoderType)
encoder.encoders[index] = (
encoderProto.name,
# Call class.read() where class is determined by _ATTR_CLASS_MAP
_ATTR_CLASS_MAP.get(encoderType).read(encoderDetails),
encoderProto.offset
)
encoder.width += encoder.encoders[index][1].getWidth()
# Derive description from encoder list
encoder.description = [(enc[1].name, enc[2]) for enc in encoder.encoders]
encoder.name = proto.name
return encoder
def write(self, proto):
proto.init("encoders", len(self.encoders))
for index, (name, encoder, offset) in enumerate(self.encoders):
encoderProto = proto.encoders[index]
encoderType = _CLASS_ATTR_MAP.get(encoder.__class__)
encoderProto.init(encoderType)
encoderDetails = getattr(encoderProto, encoderType)
encoder.write(encoderDetails)
encoderProto.name = name
encoderProto.offset = offset
proto.name = self.name
|
agpl-3.0
|
peoplepower/composer-sdk-python
|
com.ppc.Bot/devices/siren/siren_linkhigh.py
|
1
|
3469
|
'''
Created on March 3, 2017
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
from devices.siren.siren import SirenDevice
# For the LinkHigh siren, send all 3 parameters simultaneously:
# ppc.alarmWarn = sound id to play
# ppc.alarmDuration = 1 is play once, 2+ is play for that many seconds
# ppc.alarmStrobe = 0 or 1 to turn the strobe light off or on.
class LinkhighSirenDevice(SirenDevice):
"""Siren"""
# List of Device Types this class is compatible with
DEVICE_TYPES = [9009]
# Sound library
SOUNDS = {
"alarm": 1,
"dog": 2,
"warning": 3,
"bling": 4,
"bird": 5,
"droid": 6,
"lock": 7,
"phaser": 8,
"doorbell": 9,
"guncock": 10,
"gunshot": 11,
"switch": 12,
"trumpet": 13,
"whistle": 14
}
def __init__(self, botengine, device_id, device_type, device_description, precache_measurements=True):
"""
Constructor
:param botengine:
:param device_id:
:param device_type:
:param device_description:
:param precache_measurements:
"""
SirenDevice.__init__(self, botengine, device_id, device_type, device_description, precache_measurements=precache_measurements)
# Last sound played
self.last_sound = None
def get_device_type_name(self, language):
"""
:return: the name of this device type in the given language, for example, "Entry Sensor"
"""
# NOTE: Device type name
return _("Siren")
def get_image_name(self, botengine):
"""
:return: the font icon name of this device type
"""
return "siren"
#===========================================================================
# Commands
#===========================================================================
def squawk(self, botengine, warning=False):
"""
Squawk
:param warning: True for a little warning squawk, False for a more alarming squawk
"""
style = self.SOUNDS['warning']
self.play_sound(botengine, style, False, 1)
def alarm(self, botengine, on):
"""
Sound the alarm
:param on: True for on, False for off
"""
if on:
self.last_sound = self.SOUNDS['alarm']
self.play_sound(botengine, self.SOUNDS['alarm'], True, 900)
else:
self.play_sound(botengine, self.SOUNDS['alarm'], False, 0)
def play_sound(self, botengine, sound_id, strobe, duration_sec):
"""
Squawk the given sound ID
:param botengine: BotEngine
:param sound_id: Sound ID to play
:param strobe: True to activate the strobe light
:param duration_sec: 1 = play once; 2+ = play this many seconds.
"""
param_sound = {
"name": "ppc.alarmWarn",
"value": int(sound_id)
}
param_strobe = {
"name": "ppc.alarmStrobe",
"value": int(strobe)
}
param_duration = {
"name": "ppc.alarmDuration",
"value": int(duration_sec)
}
botengine.send_commands(self.device_id, [param_sound, param_strobe, param_duration], command_timeout_ms=5000)
|
apache-2.0
|
heuer/segno
|
tests/test_issue35_maskcheck.py
|
1
|
1114
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2020 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
Tests against issue 35
<https://github.com/heuer/segno/issues/35>
"""
from __future__ import absolute_import, unicode_literals
import pytest
from segno import consts
from segno import encoder
@pytest.mark.parametrize('version, mask', [(consts.VERSION_M1, 4),
(1, 8),
(1, -1),
(consts.VERSION_M2, -1)])
def test_normalize_mask_illegal(version, mask):
with pytest.raises(ValueError) as ex:
encoder.normalize_mask(mask, version < 1)
assert 'Invalid data mask' in str(ex.value)
@pytest.mark.parametrize('version, mask', [(consts.VERSION_M1, 'A'),
(1, 'B')])
def test_normalize_mask_not_int(version, mask):
with pytest.raises(ValueError) as ex:
encoder.normalize_mask(mask, version < 1)
assert 'Invalid data mask' in str(ex.value)
if __name__ == '__main__':
pytest.main([__file__])
|
bsd-3-clause
|
arju88nair/projectCulminate
|
venv/lib/python3.5/site-packages/nltk/stem/isri.py
|
7
|
15040
|
# -*- coding: utf-8 -*-
#
# Natural Language Toolkit: The ISRI Arabic Stemmer
#
# Copyright (C) 2001-2017 NLTK Proejct
# Algorithm: Kazem Taghva, Rania Elkhoury, and Jeffrey Coombs (2005)
# Author: Hosam Algasaier <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
ISRI Arabic Stemmer
The algorithm for this stemmer is described in:
Taghva, K., Elkoury, R., and Coombs, J. 2005. Arabic Stemming without a root dictionary.
Information Science Research Institute. University of Nevada, Las Vegas, USA.
The Information Science Research Institute’s (ISRI) Arabic stemmer shares many features
with the Khoja stemmer. However, the main difference is that ISRI stemmer does not use root
dictionary. Also, if a root is not found, ISRI stemmer returned normalized form, rather than
returning the original unmodified word.
Additional adjustments were made to improve the algorithm:
1- Adding 60 stop words.
2- Adding the pattern (تفاعيل) to ISRI pattern set.
3- The step 2 in the original algorithm was normalizing all hamza. This step is discarded because it
increases the word ambiguities and changes the original root.
"""
from __future__ import unicode_literals
import re
from nltk.stem.api import StemmerI
class ISRIStemmer(StemmerI):
'''
ISRI Arabic stemmer based on algorithm: Arabic Stemming without a root dictionary.
Information Science Research Institute. University of Nevada, Las Vegas, USA.
A few minor modifications have been made to ISRI basic algorithm.
See the source code of this module for more information.
isri.stem(token) returns Arabic root for the given token.
The ISRI Stemmer requires that all tokens have Unicode string types.
If you use Python IDLE on Arabic Windows you have to decode text first
using Arabic '1256' coding.
'''
def __init__(self):
# length three prefixes
self.p3 = ['\u0643\u0627\u0644', '\u0628\u0627\u0644',
'\u0648\u0644\u0644', '\u0648\u0627\u0644']
# length two prefixes
self.p2 = ['\u0627\u0644', '\u0644\u0644']
# length one prefixes
self.p1 = ['\u0644', '\u0628', '\u0641', '\u0633', '\u0648',
'\u064a', '\u062a', '\u0646', '\u0627']
# length three suffixes
self.s3 = ['\u062a\u0645\u0644', '\u0647\u0645\u0644',
'\u062a\u0627\u0646', '\u062a\u064a\u0646',
'\u0643\u0645\u0644']
# length two suffixes
self.s2 = ['\u0648\u0646', '\u0627\u062a', '\u0627\u0646',
'\u064a\u0646', '\u062a\u0646', '\u0643\u0645',
'\u0647\u0646', '\u0646\u0627', '\u064a\u0627',
'\u0647\u0627', '\u062a\u0645', '\u0643\u0646',
'\u0646\u064a', '\u0648\u0627', '\u0645\u0627',
'\u0647\u0645']
# length one suffixes
self.s1 = ['\u0629', '\u0647', '\u064a', '\u0643', '\u062a',
'\u0627', '\u0646']
# groups of length four patterns
self.pr4 = {0: ['\u0645'], 1: ['\u0627'],
2: ['\u0627', '\u0648', '\u064A'], 3: ['\u0629']}
# Groups of length five patterns and length three roots
self.pr53 = {0: ['\u0627', '\u062a'],
1: ['\u0627', '\u064a', '\u0648'],
2: ['\u0627', '\u062a', '\u0645'],
3: ['\u0645', '\u064a', '\u062a'],
4: ['\u0645', '\u062a'],
5: ['\u0627', '\u0648'],
6: ['\u0627', '\u0645']}
self.re_short_vowels = re.compile(r'[\u064B-\u0652]')
self.re_hamza = re.compile(r'[\u0621\u0624\u0626]')
self.re_initial_hamza = re.compile(r'^[\u0622\u0623\u0625]')
self.stop_words = ['\u064a\u0643\u0648\u0646',
'\u0648\u0644\u064a\u0633',
'\u0648\u0643\u0627\u0646',
'\u0643\u0630\u0644\u0643',
'\u0627\u0644\u062a\u064a',
'\u0648\u0628\u064a\u0646',
'\u0639\u0644\u064a\u0647\u0627',
'\u0645\u0633\u0627\u0621',
'\u0627\u0644\u0630\u064a',
'\u0648\u0643\u0627\u0646\u062a',
'\u0648\u0644\u0643\u0646',
'\u0648\u0627\u0644\u062a\u064a',
'\u062a\u0643\u0648\u0646',
'\u0627\u0644\u064a\u0648\u0645',
'\u0627\u0644\u0644\u0630\u064a\u0646',
'\u0639\u0644\u064a\u0647',
'\u0643\u0627\u0646\u062a',
'\u0644\u0630\u0644\u0643',
'\u0623\u0645\u0627\u0645',
'\u0647\u0646\u0627\u0643',
'\u0645\u0646\u0647\u0627',
'\u0645\u0627\u0632\u0627\u0644',
'\u0644\u0627\u0632\u0627\u0644',
'\u0644\u0627\u064a\u0632\u0627\u0644',
'\u0645\u0627\u064a\u0632\u0627\u0644',
'\u0627\u0635\u0628\u062d',
'\u0623\u0635\u0628\u062d',
'\u0623\u0645\u0633\u0649',
'\u0627\u0645\u0633\u0649',
'\u0623\u0636\u062d\u0649',
'\u0627\u0636\u062d\u0649',
'\u0645\u0627\u0628\u0631\u062d',
'\u0645\u0627\u0641\u062a\u0626',
'\u0645\u0627\u0627\u0646\u0641\u0643',
'\u0644\u0627\u0633\u064a\u0645\u0627',
'\u0648\u0644\u0627\u064a\u0632\u0627\u0644',
'\u0627\u0644\u062d\u0627\u0644\u064a',
'\u0627\u0644\u064a\u0647\u0627',
'\u0627\u0644\u0630\u064a\u0646',
'\u0641\u0627\u0646\u0647',
'\u0648\u0627\u0644\u0630\u064a',
'\u0648\u0647\u0630\u0627',
'\u0644\u0647\u0630\u0627',
'\u0641\u0643\u0627\u0646',
'\u0633\u062a\u0643\u0648\u0646',
'\u0627\u0644\u064a\u0647',
'\u064a\u0645\u0643\u0646',
'\u0628\u0647\u0630\u0627',
'\u0627\u0644\u0630\u0649']
def stem(self, token):
"""
Stemming a word token using the ISRI stemmer.
"""
token = self.norm(token, 1) # remove diacritics which representing Arabic short vowels
if token in self.stop_words:
return token # exclude stop words from being processed
token = self.pre32(token) # remove length three and length two prefixes in this order
token = self.suf32(token) # remove length three and length two suffixes in this order
token = self.waw(token) # remove connective ‘و’ if it precedes a word beginning with ‘و’
token = self.norm(token, 2) # normalize initial hamza to bare alif
# if 4 <= word length <= 7, then stem; otherwise, no stemming
if len(token) == 4: # length 4 word
token = self.pro_w4(token)
elif len(token) == 5: # length 5 word
token = self.pro_w53(token)
token = self.end_w5(token)
elif len(token) == 6: # length 6 word
token = self.pro_w6(token)
token = self.end_w6(token)
elif len(token) == 7: # length 7 word
token = self.suf1(token)
if len(token) == 7:
token = self.pre1(token)
if len(token) == 6:
token = self.pro_w6(token)
token = self.end_w6(token)
return token
def norm(self, word, num=3):
"""
normalization:
num=1 normalize diacritics
num=2 normalize initial hamza
num=3 both 1&2
"""
if num == 1:
word = self.re_short_vowels.sub('', word)
elif num == 2:
word = self.re_initial_hamza.sub('\u0627', word)
elif num == 3:
word = self.re_short_vowels.sub('', word)
word = self.re_initial_hamza.sub('\u0627', word)
return word
def pre32(self, word):
"""remove length three and length two prefixes in this order"""
if len(word) >= 6:
for pre3 in self.p3:
if word.startswith(pre3):
return word[3:]
if len(word) >= 5:
for pre2 in self.p2:
if word.startswith(pre2):
return word[2:]
return word
def suf32(self, word):
"""remove length three and length two suffixes in this order"""
if len(word) >= 6:
for suf3 in self.s3:
if word.endswith(suf3):
return word[:-3]
if len(word) >= 5:
for suf2 in self.s2:
if word.endswith(suf2):
return word[:-2]
return word
def waw(self, word):
"""remove connective ‘و’ if it precedes a word beginning with ‘و’ """
if len(word) >= 4 and word[:2] == '\u0648\u0648':
word = word[1:]
return word
def pro_w4(self, word):
"""process length four patterns and extract length three roots"""
if word[0] in self.pr4[0]: # مفعل
word = word[1:]
elif word[1] in self.pr4[1]: # فاعل
word = word[:1] + word[2:]
elif word[2] in self.pr4[2]: # فعال - فعول - فعيل
word = word[:2] + word[3]
elif word[3] in self.pr4[3]: # فعلة
word = word[:-1]
else:
word = self.suf1(word) # do - normalize short sufix
if len(word) == 4:
word = self.pre1(word) # do - normalize short prefix
return word
def pro_w53(self, word):
"""process length five patterns and extract length three roots"""
if word[2] in self.pr53[0] and word[0] == '\u0627': # افتعل - افاعل
word = word[1] + word[3:]
elif word[3] in self.pr53[1] and word[0] == '\u0645': # مفعول - مفعال - مفعيل
word = word[1:3] + word[4]
elif word[0] in self.pr53[2] and word[4] == '\u0629': # مفعلة - تفعلة - افعلة
word = word[1:4]
elif word[0] in self.pr53[3] and word[2] == '\u062a': # مفتعل - يفتعل - تفتعل
word = word[1] + word[3:]
elif word[0] in self.pr53[4] and word[2] == '\u0627': # مفاعل - تفاعل
word = word[1] + word[3:]
elif word[2] in self.pr53[5] and word[4] == '\u0629': # فعولة - فعالة
word = word[:2] + word[3]
elif word[0] in self.pr53[6] and word[1] == '\u0646': # انفعل - منفعل
word = word[2:]
elif word[3] == '\u0627' and word[0] == '\u0627': # افعال
word = word[1:3] + word[4]
elif word[4] == '\u0646' and word[3] == '\u0627': # فعلان
word = word[:3]
elif word[3] == '\u064a' and word[0] == '\u062a': # تفعيل
word = word[1:3] + word[4]
elif word[3] == '\u0648' and word[1] == '\u0627': # فاعول
word = word[0] + word[2] + word[4]
elif word[2] == '\u0627' and word[1] == '\u0648': # فواعل
word = word[0] + word[3:]
elif word[3] == '\u0626' and word[2] == '\u0627': # فعائل
word = word[:2] + word[4]
elif word[4] == '\u0629' and word[1] == '\u0627': # فاعلة
word = word[0] + word[2:4]
elif word[4] == '\u064a' and word[2] == '\u0627': # فعالي
word = word[:2] + word[3]
else:
word = self.suf1(word) # do - normalize short sufix
if len(word) == 5:
word = self.pre1(word) # do - normalize short prefix
return word
def pro_w54(self, word):
"""process length five patterns and extract length four roots"""
if word[0] in self.pr53[2]: # تفعلل - افعلل - مفعلل
word = word[1:]
elif word[4] == '\u0629': # فعللة
word = word[:4]
elif word[2] == '\u0627': # فعالل
word = word[:2] + word[3:]
return word
def end_w5(self, word):
"""ending step (word of length five)"""
if len(word) == 4:
word = self.pro_w4(word)
elif len(word) == 5:
word = self.pro_w54(word)
return word
def pro_w6(self, word):
"""process length six patterns and extract length three roots"""
if word.startswith('\u0627\u0633\u062a') or word.startswith('\u0645\u0633\u062a'): # مستفعل - استفعل
word = word[3:]
elif word[0] == '\u0645' and word[3] == '\u0627' and word[5] == '\u0629': # مفعالة
word = word[1:3] + word[4]
elif word[0] == '\u0627' and word[2] == '\u062a' and word[4] == '\u0627': # افتعال
word = word[1] + word[3] + word[5]
elif word[0] == '\u0627' and word[3] == '\u0648' and word[2] == word[4]: # افعوعل
word = word[1] + word[4:]
elif word[0] == '\u062a' and word[2] == '\u0627' and word[4] == '\u064a': # تفاعيل new pattern
word = word[1] + word[3] + word[5]
else:
word = self.suf1(word) # do - normalize short sufix
if len(word) == 6:
word = self.pre1(word) # do - normalize short prefix
return word
def pro_w64(self, word):
"""process length six patterns and extract length four roots"""
if word[0] == '\u0627' and word[4] == '\u0627': # افعلال
word = word[1:4] + word[5]
elif word.startswith('\u0645\u062a'): # متفعلل
word = word[2:]
return word
def end_w6(self, word):
"""ending step (word of length six)"""
if len(word) == 5:
word = self.pro_w53(word)
word = self.end_w5(word)
elif len(word) == 6:
word = self.pro_w64(word)
return word
def suf1(self, word):
"""normalize short sufix"""
for sf1 in self.s1:
if word.endswith(sf1):
return word[:-1]
return word
def pre1(self, word):
"""normalize short prefix"""
for sp1 in self.p1:
if word.startswith(sp1):
return word[1:]
return word
|
apache-2.0
|
mm112287/2015cda_g8_0421
|
static/Brython3.1.0-20150301-090019/Lib/ui/widget.py
|
706
|
1774
|
import random
from browser import doc
def getMousePosition(e):
if e is None:
e=win.event
if e.pageX or e.pageY:
return {'x': e.pageX, 'y': e.pageY}
if e.clientX or e.clientY:
_posx=e.clientX + doc.body.scrollLeft + doc.documentElement.scrollLeft;
_posy=e.clientY + doc.body.scrollTop + doc.documentElement.scrollTop;
return {'x': _posx, 'y': _posy}
return {'x': 0, 'y': 0}
class Widget:
def __init__(self, element, type, id=None):
self._element=element
if id is None:
self._element.id='%s_%s' % (type, int(100000*random.random()))
else:
self._element.id=id
def get_id(self):
return self._element.id
def attach(self, element_id):
""" append this DOM component to DOM element element_id"""
#document[element_id] <= self._element #this doesn't work :(
#doc is actually the global 'doc' not the one we imported from browser :(
doc[element_id] <= self._element
def show(self):
self._element.display='block'
def hide(self):
self._element.display='none'
class DraggableWidget(Widget):
def __init__(self, element, type, id=None):
Widget.__init__(self, element, type, id)
def drag(e):
self._element.style.top='%spx' % (e.clientY - self._deltaY)
self._element.style.left='%spx' % (e.clientX - self._deltaX)
def mouseDown(e):
self._element.style.position='absolute'
self._deltaX=e.clientX - self._element.offsetLeft
self._deltaY=e.clientY - self._element.offsetTop
doc.bind('mousemove', drag)
def mouseUp(e):
doc.unbind('mousemove')
self._element.bind('mousedown', mouseDown)
self._element.bind('mouseup', mouseUp)
|
gpl-3.0
|
McNetic/CouchPotatoServer-de
|
libs/requests/packages/urllib3/util/retry.py
|
699
|
9924
|
import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.MAX_BACKOFF`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/status code retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
|
gpl-3.0
|
kevin8909/xjerp
|
openerp/addons/mrp_repair/__init__.py
|
64
|
1102
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mrp_repair
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
Pelagicore/qface
|
tests/test_tags.py
|
2
|
1797
|
from qface.generator import FileSystem
import logging
import logging.config
from path import Path
# logging.config.fileConfig('logging.ini')
logging.basicConfig()
log = logging.getLogger(__name__)
inputPath = Path('tests/in')
log.debug('input path folder: {0}'.format(inputPath.abspath()))
def loadTuner():
path = inputPath / 'com.pelagicore.ivi.tuner.qface'
return FileSystem.parse_document(path)
def test_tag():
system = loadTuner()
# lookup module
module = system.lookup('com.pelagicore.ivi.tuner')
assert module is module.lookup('com.pelagicore.ivi.tuner')
# lookup service
service = system.lookup('com.pelagicore.ivi.tuner.Tuner')
assert service is module.lookup('Tuner')
assert 'service' in service.tags
assert 'interface' in service.tags
# lookup struct
struct = system.lookup('com.pelagicore.ivi.tuner.Station')
assert struct is module.lookup('Station')
# lookup enum
enum = system.lookup('com.pelagicore.ivi.tuner.Waveband')
assert enum is module.lookup('Waveband')
assert 'default' in enum.tags
assert enum.attribute('default', 'value') == 'FM'
def test_meta_tags():
system = loadTuner()
interface = system.lookup('com.pelagicore.ivi.tuner.Tuner')
assert interface
assert 'port' in interface.tags
def test_flag():
system = loadTuner()
interface = system.lookup('com.pelagicore.ivi.tuner.Tuner')
assert interface
assert interface.attribute('config', 'private') is True
assert interface.attribute('config', 'a') == 'a' # use value from yaml
assert interface.attribute('config', 'b') == 'b' # use value from yaml
assert interface.attribute('config', 'c') == 'C' # use value from IDL
assert interface.tags['data'] == [1, 2, 3] # array annotatiom
|
mit
|
Pencroff/ai-hackathon-2017
|
Backend/venv/lib/python3.6/site-packages/requests/packages/chardet/cp949prober.py
|
2801
|
1782
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
|
mit
|
opensim-org/three.js
|
utils/exporters/blender/addons/io_three/exporter/texture.py
|
173
|
1407
|
from .. import constants, logger
from . import base_classes, image, api
class Texture(base_classes.BaseNode):
"""Class that wraps a texture node"""
def __init__(self, node, parent):
logger.debug("Texture().__init__(%s)", node)
base_classes.BaseNode.__init__(self, node, parent, constants.TEXTURE)
num = constants.NUMERIC
img_inst = self.scene.image(api.texture.file_name(self.node))
if not img_inst:
image_node = api.texture.image_node(self.node)
img_inst = image.Image(image_node.name, self.scene)
self.scene[constants.IMAGES].append(img_inst)
self[constants.IMAGE] = img_inst[constants.UUID]
wrap = api.texture.wrap(self.node)
self[constants.WRAP] = (num[wrap[0]], num[wrap[1]])
if constants.WRAPPING.REPEAT in wrap:
self[constants.REPEAT] = api.texture.repeat(self.node)
self[constants.ANISOTROPY] = api.texture.anisotropy(self.node)
self[constants.MAG_FILTER] = num[api.texture.mag_filter(self.node)]
self[constants.MIN_FILTER] = num[api.texture.min_filter(self.node)]
self[constants.MAPPING] = num[api.texture.mapping(self.node)]
@property
def image(self):
"""
:return: the image object of the current texture
:rtype: image.Image
"""
return self.scene.image(self[constants.IMAGE])
|
mit
|
nismod/energy_demand
|
energy_demand/plotting/fig_weather_variability_priod.py
|
1
|
4091
|
"""Plot stacked enduses for each submodel
"""
import numpy as np
import matplotlib.pyplot as plt
#from scipy.stats import mstats
import pandas as pd
from energy_demand.technologies import tech_related
from energy_demand.plotting import basic_plot_functions
def sum_all_enduses_fueltype(
data_enduses,
fueltype_str=False
):
"""Sum across all enduses and fueltypes
TODO
"""
y_values_enduse_yrs = {}
for year in data_enduses.keys():
for enduse in data_enduses[year].keys():
# Sum across all fueltypes for every hour
tot_across_fueltypes = data_enduses[year][enduse]
try:
y_values_enduse_yrs[year] += tot_across_fueltypes
except KeyError:
y_values_enduse_yrs[year] = tot_across_fueltypes
return y_values_enduse_yrs
def run(
data_input,
simulation_yr_to_plot,
period_h,
fueltype_str,
fig_name
):
"""
https://stackoverflow.com/questions/18313322/plotting-quantiles-median-and-spread-using-scipy-and-matplotlib
https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.quantile.html
"""
# Select period and fueltype
fueltype_int = tech_related.get_fueltype_int(fueltype_str)
# -----------------------------------------------------------
# Iterate overall weather_yrs and store data in dataframe
# (columns = timestep, rows: value of year)
# -----------------------------------------------------------
columns = period_h # hours in 8760 hours
# List of selected data for every weather year (which is then converted to array)
weather_yrs_data = []
print("Weather yrs: " + str(list(data_input.keys())), flush=True)
for weather_yr, data_weather_yr in data_input.items():
# Weather year specific data
data_input_fueltype = data_weather_yr[simulation_yr_to_plot][fueltype_int] # Select fueltype
data_input_reshape = data_input_fueltype.reshape(8760) # reshape
data_input_selection_hrs = data_input_reshape[period_h] # select period
weather_yrs_data.append(data_input_selection_hrs)
weather_yrs_data = np.array(weather_yrs_data)
# Create dataframe
df = pd.DataFrame(weather_yrs_data, columns=columns)
# Calculate quantiles
quantile_95 = 0.95
quantile_05 = 0.05
df_q_95 = df.quantile(quantile_95)
df_q_05 = df.quantile(quantile_05)
#Transpose for plotting purposes
df = df.T
df_q_95 = df_q_95.T
df_q_05 = df_q_05.T
fig = plt.figure() #(figsize = cm2inch(10,10))
ax = fig.add_subplot(111)
# 2015 weather year
data_2015 = data_weather_yr[2015][fueltype_int].reshape(8760)[period_h]
# ---------------
# Smoothing lines
# ---------------
try:
period_h_smoothed, df_q_95_smoothed = basic_plot_functions.smooth_data(period_h, df_q_95, num=40000)
period_h_smoothed, df_q_05_smoothed = basic_plot_functions.smooth_data(period_h, df_q_05, num=40000)
period_h_smoothed, data_2015_smoothed = basic_plot_functions.smooth_data(period_h, data_2015, num=40000)
except:
period_h_smoothed = period_h
df_q_95_smoothed = df_q_95
df_q_05_smoothed = df_q_05
data_2015_smoothed = data_2015
plt.plot(period_h_smoothed, data_2015_smoothed, color='tomato', linestyle='-', linewidth=2, label="2015 weather_yr")
plt.plot(period_h_smoothed, df_q_05_smoothed, color='black', linestyle='--', linewidth=0.5, label="0.05")
plt.plot(period_h_smoothed, df_q_95_smoothed, color='black', linestyle='--', linewidth=0.5, label="0.95")
# -----------------
# Uncertainty range
# -----------------
plt.fill_between(
period_h_smoothed, #x
df_q_95_smoothed, #y1
df_q_05_smoothed, #y2
alpha=.25,
facecolor="grey",
label="uncertainty band")
plt.legend(
prop={
'family':'arial',
'size': 10},
loc='best',
frameon=False,
shadow=True)
plt.show()
|
mit
|
mitodl/xanalytics
|
unidecode/x070.py
|
252
|
4693
|
data = (
'You ', # 0x00
'Yang ', # 0x01
'Lu ', # 0x02
'Si ', # 0x03
'Jie ', # 0x04
'Ying ', # 0x05
'Du ', # 0x06
'Wang ', # 0x07
'Hui ', # 0x08
'Xie ', # 0x09
'Pan ', # 0x0a
'Shen ', # 0x0b
'Biao ', # 0x0c
'Chan ', # 0x0d
'Mo ', # 0x0e
'Liu ', # 0x0f
'Jian ', # 0x10
'Pu ', # 0x11
'Se ', # 0x12
'Cheng ', # 0x13
'Gu ', # 0x14
'Bin ', # 0x15
'Huo ', # 0x16
'Xian ', # 0x17
'Lu ', # 0x18
'Qin ', # 0x19
'Han ', # 0x1a
'Ying ', # 0x1b
'Yong ', # 0x1c
'Li ', # 0x1d
'Jing ', # 0x1e
'Xiao ', # 0x1f
'Ying ', # 0x20
'Sui ', # 0x21
'Wei ', # 0x22
'Xie ', # 0x23
'Huai ', # 0x24
'Hao ', # 0x25
'Zhu ', # 0x26
'Long ', # 0x27
'Lai ', # 0x28
'Dui ', # 0x29
'Fan ', # 0x2a
'Hu ', # 0x2b
'Lai ', # 0x2c
'[?] ', # 0x2d
'[?] ', # 0x2e
'Ying ', # 0x2f
'Mi ', # 0x30
'Ji ', # 0x31
'Lian ', # 0x32
'Jian ', # 0x33
'Ying ', # 0x34
'Fen ', # 0x35
'Lin ', # 0x36
'Yi ', # 0x37
'Jian ', # 0x38
'Yue ', # 0x39
'Chan ', # 0x3a
'Dai ', # 0x3b
'Rang ', # 0x3c
'Jian ', # 0x3d
'Lan ', # 0x3e
'Fan ', # 0x3f
'Shuang ', # 0x40
'Yuan ', # 0x41
'Zhuo ', # 0x42
'Feng ', # 0x43
'She ', # 0x44
'Lei ', # 0x45
'Lan ', # 0x46
'Cong ', # 0x47
'Qu ', # 0x48
'Yong ', # 0x49
'Qian ', # 0x4a
'Fa ', # 0x4b
'Guan ', # 0x4c
'Que ', # 0x4d
'Yan ', # 0x4e
'Hao ', # 0x4f
'Hyeng ', # 0x50
'Sa ', # 0x51
'Zan ', # 0x52
'Luan ', # 0x53
'Yan ', # 0x54
'Li ', # 0x55
'Mi ', # 0x56
'Shan ', # 0x57
'Tan ', # 0x58
'Dang ', # 0x59
'Jiao ', # 0x5a
'Chan ', # 0x5b
'[?] ', # 0x5c
'Hao ', # 0x5d
'Ba ', # 0x5e
'Zhu ', # 0x5f
'Lan ', # 0x60
'Lan ', # 0x61
'Nang ', # 0x62
'Wan ', # 0x63
'Luan ', # 0x64
'Xun ', # 0x65
'Xian ', # 0x66
'Yan ', # 0x67
'Gan ', # 0x68
'Yan ', # 0x69
'Yu ', # 0x6a
'Huo ', # 0x6b
'Si ', # 0x6c
'Mie ', # 0x6d
'Guang ', # 0x6e
'Deng ', # 0x6f
'Hui ', # 0x70
'Xiao ', # 0x71
'Xiao ', # 0x72
'Hu ', # 0x73
'Hong ', # 0x74
'Ling ', # 0x75
'Zao ', # 0x76
'Zhuan ', # 0x77
'Jiu ', # 0x78
'Zha ', # 0x79
'Xie ', # 0x7a
'Chi ', # 0x7b
'Zhuo ', # 0x7c
'Zai ', # 0x7d
'Zai ', # 0x7e
'Can ', # 0x7f
'Yang ', # 0x80
'Qi ', # 0x81
'Zhong ', # 0x82
'Fen ', # 0x83
'Niu ', # 0x84
'Jiong ', # 0x85
'Wen ', # 0x86
'Po ', # 0x87
'Yi ', # 0x88
'Lu ', # 0x89
'Chui ', # 0x8a
'Pi ', # 0x8b
'Kai ', # 0x8c
'Pan ', # 0x8d
'Yan ', # 0x8e
'Kai ', # 0x8f
'Pang ', # 0x90
'Mu ', # 0x91
'Chao ', # 0x92
'Liao ', # 0x93
'Gui ', # 0x94
'Kang ', # 0x95
'Tun ', # 0x96
'Guang ', # 0x97
'Xin ', # 0x98
'Zhi ', # 0x99
'Guang ', # 0x9a
'Guang ', # 0x9b
'Wei ', # 0x9c
'Qiang ', # 0x9d
'[?] ', # 0x9e
'Da ', # 0x9f
'Xia ', # 0xa0
'Zheng ', # 0xa1
'Zhu ', # 0xa2
'Ke ', # 0xa3
'Zhao ', # 0xa4
'Fu ', # 0xa5
'Ba ', # 0xa6
'Duo ', # 0xa7
'Duo ', # 0xa8
'Ling ', # 0xa9
'Zhuo ', # 0xaa
'Xuan ', # 0xab
'Ju ', # 0xac
'Tan ', # 0xad
'Pao ', # 0xae
'Jiong ', # 0xaf
'Pao ', # 0xb0
'Tai ', # 0xb1
'Tai ', # 0xb2
'Bing ', # 0xb3
'Yang ', # 0xb4
'Tong ', # 0xb5
'Han ', # 0xb6
'Zhu ', # 0xb7
'Zha ', # 0xb8
'Dian ', # 0xb9
'Wei ', # 0xba
'Shi ', # 0xbb
'Lian ', # 0xbc
'Chi ', # 0xbd
'Huang ', # 0xbe
'[?] ', # 0xbf
'Hu ', # 0xc0
'Shuo ', # 0xc1
'Lan ', # 0xc2
'Jing ', # 0xc3
'Jiao ', # 0xc4
'Xu ', # 0xc5
'Xing ', # 0xc6
'Quan ', # 0xc7
'Lie ', # 0xc8
'Huan ', # 0xc9
'Yang ', # 0xca
'Xiao ', # 0xcb
'Xiu ', # 0xcc
'Xian ', # 0xcd
'Yin ', # 0xce
'Wu ', # 0xcf
'Zhou ', # 0xd0
'Yao ', # 0xd1
'Shi ', # 0xd2
'Wei ', # 0xd3
'Tong ', # 0xd4
'Xue ', # 0xd5
'Zai ', # 0xd6
'Kai ', # 0xd7
'Hong ', # 0xd8
'Luo ', # 0xd9
'Xia ', # 0xda
'Zhu ', # 0xdb
'Xuan ', # 0xdc
'Zheng ', # 0xdd
'Po ', # 0xde
'Yan ', # 0xdf
'Hui ', # 0xe0
'Guang ', # 0xe1
'Zhe ', # 0xe2
'Hui ', # 0xe3
'Kao ', # 0xe4
'[?] ', # 0xe5
'Fan ', # 0xe6
'Shao ', # 0xe7
'Ye ', # 0xe8
'Hui ', # 0xe9
'[?] ', # 0xea
'Tang ', # 0xeb
'Jin ', # 0xec
'Re ', # 0xed
'[?] ', # 0xee
'Xi ', # 0xef
'Fu ', # 0xf0
'Jiong ', # 0xf1
'Che ', # 0xf2
'Pu ', # 0xf3
'Jing ', # 0xf4
'Zhuo ', # 0xf5
'Ting ', # 0xf6
'Wan ', # 0xf7
'Hai ', # 0xf8
'Peng ', # 0xf9
'Lang ', # 0xfa
'Shan ', # 0xfb
'Hu ', # 0xfc
'Feng ', # 0xfd
'Chi ', # 0xfe
'Rong ', # 0xff
)
|
mit
|
shaswatsunder/aakashlabs-forum
|
ac/get_list.py
|
3
|
1609
|
# Get list of rc_id, rc_name, city etc. for search.
from ac.models import AakashCentre, Project
def get_ac_name_list(max_results=0, starts_with=''):
if starts_with:
lst = AakashCentre.objects.filter(active=True,
name__icontains=starts_with)
else:
lst = AakashCentre.objects.filter(active=True)
# if max_results > 0:
# if len(code_list) > max_results:
# code_list = code_list[:max_results]
return lst
def get_ac_id_list(max_results=0, starts_with=''):
if starts_with:
lst = AakashCentre.objects.filter(active=True,
ac_id__contains=starts_with)
else:
lst = AakashCentre.objects.filter(active=True)
return lst
def get_ac_city_list(max_results=0, starts_with=''):
if starts_with:
lst = AakashCentre.objects.filter(active=True,
city__icontains=starts_with)
else:
lst = AakashCentre.objects.filter(active=True)
return lst
def get_ac_state_list(max_results=0, starts_with=''):
if starts_with:
lst = AakashCentre.objects.filter(active=True,
state__icontains=starts_with)
else:
lst = AakashCentre.objects.filter(active=True)
return lst
def get_project_list(max_results=0, starts_with=''):
if starts_with:
lst = Project.objects.filter(approve=True,
name__icontains=starts_with)
else:
lst = Project.objects.filter(approve=True)
return lst
|
gpl-3.0
|
jkbradley/spark
|
examples/src/main/python/ml/fm_classifier_example.py
|
10
|
2846
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
FMClassifier Example.
"""
from __future__ import print_function
# $example on$
from pyspark.ml import Pipeline
from pyspark.ml.classification import FMClassifier
from pyspark.ml.feature import MinMaxScaler, StringIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("FMClassifierExample") \
.getOrCreate()
# $example on$
# Load and parse the data file, converting it to a DataFrame.
data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
# Index labels, adding metadata to the label column.
# Fit on whole dataset to include all labels in index.
labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data)
# Scale features.
featureScaler = MinMaxScaler(inputCol="features", outputCol="scaledFeatures").fit(data)
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a FM model.
fm = FMClassifier(labelCol="indexedLabel", featuresCol="scaledFeatures", stepSize=0.001)
# Create a Pipeline.
pipeline = Pipeline(stages=[labelIndexer, featureScaler, fm])
# Train model.
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
# Select example rows to display.
predictions.select("prediction", "indexedLabel", "features").show(5)
# Select (prediction, true label) and compute test accuracy
evaluator = MulticlassClassificationEvaluator(
labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictions)
print("Test set accuracy = %g" % accuracy)
fmModel = model.stages[2]
print("Factors: " + str(fmModel.factors))
print("Linear: " + str(fmModel.linear))
print("Intercept: " + str(fmModel.intercept))
# $example off$
spark.stop()
|
apache-2.0
|
endlessm/chromium-browser
|
native_client/pnacl/driver/shelltools.py
|
8
|
2070
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from driver_log import Log
import types
######################################################################
#
# Shell Utilities
#
######################################################################
class shell(object):
@staticmethod
def unescape(s):
w = shell.split(s)
if len(w) == 0:
return ''
if len(w) == 1:
return w[0]
# String was not properly escaped in the first place?
assert(False)
# TODO(pdox): Simplify this function by moving more of it into unescape
@staticmethod
def split(s):
"""Split a shell-style string up into a list of distinct arguments.
For example: split('cmd -arg1 -arg2="a b c"')
Returns ['cmd', '-arg1', '-arg2=a b c']
"""
assert(isinstance(s, types.StringTypes))
out = []
inspace = True
inquote = False
buf = ''
i = 0
while i < len(s):
if s[i] == '"':
inspace = False
inquote = not inquote
elif s[i] == ' ' and not inquote:
if not inspace:
out.append(buf)
buf = ''
inspace = True
elif s[i] == '\\':
if not i+1 < len(s):
Log.Fatal('Unterminated \\ escape sequence')
inspace = False
i += 1
buf += s[i]
else:
inspace = False
buf += s[i]
i += 1
if inquote:
Log.Fatal('Unterminated quote')
if not inspace:
out.append(buf)
return out
@staticmethod
def join(args):
"""Turn a list into a shell-style string For example:
shell.join([ 'a', 'b', 'c d e' ]) = 'a b "c d e"'
"""
return ' '.join([ shell.escape(a) for a in args ])
@staticmethod
def escape(s):
"""Shell-escape special characters in a string
Surround with quotes if necessary
"""
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
if ' ' in s:
s = '"' + s + '"'
return s
|
bsd-3-clause
|
DefyVentures/edx-platform
|
common/djangoapps/status/tests.py
|
15
|
3216
|
from django.conf import settings
from django.core.cache import cache
from django.test import TestCase
import os
from django.test.utils import override_settings
from tempfile import NamedTemporaryFile
import ddt
from .status import get_site_status_msg
# Get a name where we can put test files
TMP_FILE = NamedTemporaryFile(delete=False)
TMP_NAME = TMP_FILE.name
# Close it--we just want the path.
TMP_FILE.close()
@ddt.ddt
@override_settings(STATUS_MESSAGE_PATH=TMP_NAME)
class TestStatus(TestCase):
"""Test that the get_site_status_msg function does the right thing"""
no_file = None
invalid_json = """{
"global" : "Hello, Globe",
}"""
global_only = """{
"global" : "Hello, Globe"
}"""
toy_only = """{
"edX/toy/2012_Fall" : "A toy story"
}"""
global_and_toy = """{
"global" : "Hello, Globe",
"edX/toy/2012_Fall" : "A toy story"
}"""
# json to use, expected results for course=None (e.g. homepage),
# for toy course, for full course. Note that get_site_status_msg
# is supposed to return global message even if course=None. The
# template just happens to not display it outside the courseware
# at the moment...
checks = [
(no_file, None, None, None),
(invalid_json, None, None, None),
(global_only, "Hello, Globe", "Hello, Globe", "Hello, Globe"),
(toy_only, None, "A toy story", None),
(global_and_toy, "Hello, Globe", "Hello, Globe<br>A toy story", "Hello, Globe"),
]
def setUp(self):
"""
Fake course ids, since we don't have to have full django
settings (common tests run without the lms settings imported)
"""
self.full_id = 'edX/full/2012_Fall'
self.toy_id = 'edX/toy/2012_Fall'
def create_status_file(self, contents):
"""
Write contents to settings.STATUS_MESSAGE_PATH.
"""
with open(settings.STATUS_MESSAGE_PATH, 'w') as f:
f.write(contents)
def clear_status_cache(self):
"""
Remove the cached status message, if found
"""
if cache.get('site_status_msg') is not None:
cache.delete('site_status_msg')
def remove_status_file(self):
"""Delete the status file if it exists"""
if os.path.exists(settings.STATUS_MESSAGE_PATH):
os.remove(settings.STATUS_MESSAGE_PATH)
def tearDown(self):
self.remove_status_file()
@ddt.data(*checks)
@ddt.unpack
def test_get_site_status_msg(self, json_str, exp_none, exp_toy, exp_full):
"""run the tests"""
self.remove_status_file()
if json_str:
self.create_status_file(json_str)
for course_id, expected_msg in [(None, exp_none), (self.toy_id, exp_toy), (self.full_id, exp_full)]:
self.assertEqual(get_site_status_msg(course_id), expected_msg)
self.assertEqual(cache.get('site_status_msg'), expected_msg)
# check that `get_site_status_msg` works as expected when the cache
# is warmed, too
self.assertEqual(get_site_status_msg(course_id), expected_msg)
self.clear_status_cache()
|
agpl-3.0
|
Akasurde/ansible
|
lib/ansible/playbook/role/include.py
|
83
|
2605
|
# (c) 2014 Michael DeHaan, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.six import iteritems, string_types
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.role.definition import RoleDefinition
from ansible.playbook.role.requirement import RoleRequirement
from ansible.module_utils._text import to_native
__all__ = ['RoleInclude']
class RoleInclude(RoleDefinition):
"""
A derivative of RoleDefinition, used by playbook code when a role
is included for execution in a play.
"""
_delegate_to = FieldAttribute(isa='string')
_delegate_facts = FieldAttribute(isa='bool', default=False)
def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None, collection_list=None):
super(RoleInclude, self).__init__(play=play, role_basedir=role_basedir, variable_manager=variable_manager,
loader=loader, collection_list=collection_list)
@staticmethod
def load(data, play, current_role_path=None, parent_role=None, variable_manager=None, loader=None, collection_list=None):
if not (isinstance(data, string_types) or isinstance(data, dict) or isinstance(data, AnsibleBaseYAMLObject)):
raise AnsibleParserError("Invalid role definition: %s" % to_native(data))
if isinstance(data, string_types) and ',' in data:
raise AnsibleError("Invalid old style role requirement: %s" % data)
ri = RoleInclude(play=play, role_basedir=current_role_path, variable_manager=variable_manager, loader=loader, collection_list=collection_list)
return ri.load_data(data, variable_manager=variable_manager, loader=loader)
|
gpl-3.0
|
hhm0/supysonic
|
tests/test_manager_user.py
|
1
|
4845
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# This file is part of Supysonic.
# Supysonic is a Python implementation of the Subsonic server API.
#
# Copyright (C) 2013-2017 Alban 'spl0k' Féron
# 2017 Óscar García Amor
#
# Distributed under terms of the GNU AGPLv3 license.
from supysonic import db
from supysonic.managers.user import UserManager
import unittest
import uuid
class UserManagerTestCase(unittest.TestCase):
def setUp(self):
# Create an empty sqlite database in memory
self.store = db.get_store("sqlite:")
# Read schema from file
with open('schema/sqlite.sql') as sql:
schema = sql.read()
# Create tables on memory database
for command in schema.split(';'):
self.store.execute(command)
# Create some users
self.assertEqual(UserManager.add(self.store, 'alice', 'alice', '[email protected]', True), UserManager.SUCCESS)
self.assertEqual(UserManager.add(self.store, 'bob', 'bob', '[email protected]', False), UserManager.SUCCESS)
self.assertEqual(UserManager.add(self.store, 'charlie', 'charlie', '[email protected]', False), UserManager.SUCCESS)
def test_encrypt_password(self):
self.assertEqual(UserManager._UserManager__encrypt_password('password','salt'), ('59b3e8d637cf97edbe2384cf59cb7453dfe30789', 'salt'))
self.assertEqual(UserManager._UserManager__encrypt_password('pass-word','pepper'), ('d68c95a91ed7773aa57c7c044d2309a5bf1da2e7', 'pepper'))
def test_get_user(self):
# Get existing users
for name in ['alice', 'bob', 'charlie']:
user = self.store.find(db.User, db.User.name == name).one()
self.assertEqual(UserManager.get(self.store, user.id), (UserManager.SUCCESS, user))
# Get with invalid UUID
self.assertEqual(UserManager.get(self.store, 'invalid-uuid'), (UserManager.INVALID_ID, None))
# Non-existent user
self.assertEqual(UserManager.get(self.store, uuid.uuid4()), (UserManager.NO_SUCH_USER, None))
def test_add_user(self):
# Create duplicate
self.assertEqual(UserManager.add(self.store, 'alice', 'alice', '[email protected]', True), UserManager.NAME_EXISTS)
def test_delete_user(self):
# Delete existing users
for name in ['alice', 'bob', 'charlie']:
user = self.store.find(db.User, db.User.name == name).one()
self.assertEqual(UserManager.delete(self.store, user.id), UserManager.SUCCESS)
# Delete invalid UUID
self.assertEqual(UserManager.delete(self.store, 'invalid-uuid'), UserManager.INVALID_ID)
# Delete non-existent user
self.assertEqual(UserManager.delete(self.store, uuid.uuid4()), UserManager.NO_SUCH_USER)
def test_try_auth(self):
# Test authentication
for name in ['alice', 'bob', 'charlie']:
user = self.store.find(db.User, db.User.name == name).one()
self.assertEqual(UserManager.try_auth(self.store, name, name), (UserManager.SUCCESS, user))
# Wrong password
self.assertEqual(UserManager.try_auth(self.store, name, 'bad'), (UserManager.WRONG_PASS, None))
# Non-existent user
self.assertEqual(UserManager.try_auth(self.store, 'null', 'null'), (UserManager.NO_SUCH_USER, None))
def test_change_password(self):
# With existing users
for name in ['alice', 'bob', 'charlie']:
user = self.store.find(db.User, db.User.name == name).one()
# God password
self.assertEqual(UserManager.change_password(self.store, user.id, name, 'newpass'), UserManager.SUCCESS)
self.assertEqual(UserManager.try_auth(self.store, name, 'newpass'), (UserManager.SUCCESS, user))
# Wrong password
self.assertEqual(UserManager.change_password(self.store, user.id, 'badpass', 'newpass'), UserManager.WRONG_PASS)
# With invalid UUID
self.assertEqual(UserManager.change_password(self.store, 'invalid-uuid', 'oldpass', 'newpass'), UserManager.INVALID_ID)
# Non-existent user
self.assertEqual(UserManager.change_password(self.store, uuid.uuid4(), 'oldpass', 'newpass'), UserManager.NO_SUCH_USER)
def test_change_password2(self):
# With existing users
for name in ['alice', 'bob', 'charlie']:
self.assertEqual(UserManager.change_password2(self.store, name, 'newpass'), UserManager.SUCCESS)
user = self.store.find(db.User, db.User.name == name).one()
self.assertEqual(UserManager.try_auth(self.store, name, 'newpass'), (UserManager.SUCCESS, user))
# Non-existent user
self.assertEqual(UserManager.change_password2(self.store, 'null', 'newpass'), UserManager.NO_SUCH_USER)
if __name__ == '__main__':
unittest.main()
|
agpl-3.0
|
foxwill/ol-api-tester
|
env/lib/python2.7/site-packages/requests/packages/chardet/latin1prober.py
|
1778
|
5232
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0)
/ total)
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.73
return confidence
|
gpl-2.0
|
slint/zenodo
|
zenodo/modules/records/serializers/pidrelations.py
|
2
|
4107
|
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public Licnse
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo Serializers."""
from __future__ import absolute_import, print_function
from invenio_pidrelations.contrib.versioning import PIDVersioning
from invenio_pidstore.models import PersistentIdentifier
from zenodo.modules.records.api import ZenodoRecord
def serialize_related_identifiers(pid):
"""Serialize PID Versioning relations as related_identifiers metadata."""
pv = PIDVersioning(child=pid)
related_identifiers = []
if pv.exists:
rec = ZenodoRecord.get_record(pid.get_assigned_object())
# External DOI records don't have Concept DOI
if 'conceptdoi' in rec:
ri = {
'scheme': 'doi',
'relation': 'isVersionOf',
'identifier': rec['conceptdoi']
}
related_identifiers.append(ri)
# TODO: We do not serialize previous/next versions to
# related identifiers because of the semantic-versioning cases
# (e.g. GitHub releases of minor versions)
#
# children = pv.children.all()
# idx = children.index(pid)
# left = children[:idx]
# right = children[idx + 1:]
# for p in left:
# rec = ZenodoRecord.get_record(p.get_assigned_object())
# ri = {
# 'scheme': 'doi',
# 'relation': 'isNewVersionOf',
# 'identifier': rec['doi']
# }
# related_identifiers.append(ri)
# for p in right:
# rec = ZenodoRecord.get_record(p.get_assigned_object())
# ri = {
# 'scheme': 'doi',
# 'relation': 'isPreviousVersionOf',
# 'identifier': rec['doi']
# }
# related_identifiers.append(ri)
pv = PIDVersioning(parent=pid)
if pv.exists:
for p in pv.children:
rec = ZenodoRecord.get_record(p.get_assigned_object())
ri = {
'scheme': 'doi',
'relation': 'hasVersion',
'identifier': rec['doi']
}
related_identifiers.append(ri)
return related_identifiers
def preprocess_related_identifiers(pid, record, result):
"""Preprocess related identifiers for record serialization.
Resolves the passed pid to the proper `recid` in order to add related
identifiers from PID relations.
"""
recid_value = record.get('recid')
if pid.pid_type == 'doi' and pid.pid_value == record.get('conceptdoi'):
recid_value = record.get('conceptrecid')
result['metadata']['doi'] = record.get('conceptdoi')
recid = (pid if pid.pid_value == recid_value else
PersistentIdentifier.get(pid_type='recid', pid_value=recid_value))
if recid.pid_value == record.get('conceptrecid'):
pv = PIDVersioning(parent=recid)
else:
pv = PIDVersioning(child=recid)
# Serialize PID versioning as related identifiers
if pv.exists:
rels = serialize_related_identifiers(recid)
if rels:
result['metadata'].setdefault(
'related_identifiers', []).extend(rels)
return result
|
gpl-2.0
|
gcd0318/django
|
django/conf/project_template/project_name/settings.py
|
271
|
3288
|
"""
Django settings for {{ project_name }} project.
Generated by 'django-admin startproject' using Django {{ django_version }}.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_URL = '/static/'
|
bsd-3-clause
|
kosz85/django
|
tests/forms_tests/widget_tests/test_passwordinput.py
|
247
|
1052
|
from django.forms import PasswordInput
from .base import WidgetTest
class PasswordInputTest(WidgetTest):
widget = PasswordInput()
def test_render(self):
self.check_html(self.widget, 'password', '', html='<input type="password" name="password" />')
def test_render_ignore_value(self):
self.check_html(self.widget, 'password', 'secret', html='<input type="password" name="password" />')
def test_render_value_true(self):
"""
The render_value argument lets you specify whether the widget should
render its value. For security reasons, this is off by default.
"""
widget = PasswordInput(render_value=True)
self.check_html(widget, 'password', '', html='<input type="password" name="password" />')
self.check_html(widget, 'password', None, html='<input type="password" name="password" />')
self.check_html(
widget, 'password', '[email protected]',
html='<input type="password" name="password" value="[email protected]" />',
)
|
bsd-3-clause
|
sivel/ansible
|
test/units/executor/test_task_queue_manager_callbacks.py
|
68
|
4502
|
# (c) 2016, Steve Kuznetsov <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
from units.compat import unittest
from units.compat.mock import MagicMock
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.playbook import Playbook
from ansible.plugins.callback import CallbackBase
from ansible.utils import context_objects as co
__metaclass__ = type
class TestTaskQueueManagerCallbacks(unittest.TestCase):
def setUp(self):
inventory = MagicMock()
variable_manager = MagicMock()
loader = MagicMock()
passwords = []
# Reset the stored command line args
co.GlobalCLIArgs._Singleton__instance = None
self._tqm = TaskQueueManager(inventory, variable_manager, loader, passwords)
self._playbook = Playbook(loader)
# we use a MagicMock to register the result of the call we
# expect to `v2_playbook_on_call`. We don't mock out the
# method since we're testing code that uses `inspect` to
# look at that method's argspec and we want to ensure this
# test is easy to reason about.
self._register = MagicMock()
def tearDown(self):
# Reset the stored command line args
co.GlobalCLIArgs._Singleton__instance = None
def test_task_queue_manager_callbacks_v2_playbook_on_start(self):
"""
Assert that no exceptions are raised when sending a Playbook
start callback to a current callback module plugin.
"""
register = self._register
class CallbackModule(CallbackBase):
"""
This is a callback module with the current
method signature for `v2_playbook_on_start`.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'current_module'
def v2_playbook_on_start(self, playbook):
register(self, playbook)
callback_module = CallbackModule()
self._tqm._callback_plugins.append(callback_module)
self._tqm.send_callback('v2_playbook_on_start', self._playbook)
register.assert_called_once_with(callback_module, self._playbook)
def test_task_queue_manager_callbacks_v2_playbook_on_start_wrapped(self):
"""
Assert that no exceptions are raised when sending a Playbook
start callback to a wrapped current callback module plugin.
"""
register = self._register
def wrap_callback(func):
"""
This wrapper changes the exposed argument
names for a method from the original names
to (*args, **kwargs). This is used in order
to validate that wrappers which change par-
ameter names do not break the TQM callback
system.
:param func: function to decorate
:return: decorated function
"""
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
class WrappedCallbackModule(CallbackBase):
"""
This is a callback module with the current
method signature for `v2_playbook_on_start`
wrapped in order to change the signature.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'current_module'
@wrap_callback
def v2_playbook_on_start(self, playbook):
register(self, playbook)
callback_module = WrappedCallbackModule()
self._tqm._callback_plugins.append(callback_module)
self._tqm.send_callback('v2_playbook_on_start', self._playbook)
register.assert_called_once_with(callback_module, self._playbook)
|
gpl-3.0
|
seandavi/pelican-plugins
|
pelican_comment_system/pelican_comment_system.py
|
32
|
6416
|
# -*- coding: utf-8 -*-
"""
Pelican Comment System
======================
A Pelican plugin, which allows you to add comments to your articles.
Author: Bernhard Scheirle
"""
from __future__ import unicode_literals
import logging
import os
import copy
logger = logging.getLogger(__name__)
from itertools import chain
from pelican import signals
from pelican.readers import Readers
from pelican.writers import Writer
from . comment import Comment
from . import avatars
_all_comments = []
def setdefault(pelican, settings):
from pelican.settings import DEFAULT_CONFIG
for key, value in settings:
DEFAULT_CONFIG.setdefault(key, value)
if not pelican:
return
for key, value in settings:
pelican.settings.setdefault(key, value)
def pelican_initialized(pelican):
from pelican.settings import DEFAULT_CONFIG
settings = [
('PELICAN_COMMENT_SYSTEM', False),
('PELICAN_COMMENT_SYSTEM_DIR', 'comments'),
('PELICAN_COMMENT_SYSTEM_IDENTICON_OUTPUT_PATH', 'images/identicon'),
('PELICAN_COMMENT_SYSTEM_IDENTICON_DATA', ()),
('PELICAN_COMMENT_SYSTEM_IDENTICON_SIZE', 72),
('PELICAN_COMMENT_SYSTEM_AUTHORS', {}),
('PELICAN_COMMENT_SYSTEM_FEED', os.path.join('feeds', 'comment.%s.atom.xml')),
('PELICAN_COMMENT_SYSTEM_FEED_ALL', os.path.join('feeds', 'comments.all.atom.xml')),
('COMMENT_URL', '#comment-{slug}')
]
setdefault(pelican, settings)
DEFAULT_CONFIG['PAGE_EXCLUDES'].append(
DEFAULT_CONFIG['PELICAN_COMMENT_SYSTEM_DIR'])
DEFAULT_CONFIG['ARTICLE_EXCLUDES'].append(
DEFAULT_CONFIG['PELICAN_COMMENT_SYSTEM_DIR'])
if pelican:
pelican.settings['PAGE_EXCLUDES'].append(
pelican.settings['PELICAN_COMMENT_SYSTEM_DIR'])
pelican.settings['ARTICLE_EXCLUDES'].append(
pelican.settings['PELICAN_COMMENT_SYSTEM_DIR'])
def initialize(article_generator):
avatars.init(
article_generator.settings['OUTPUT_PATH'],
article_generator.settings[
'PELICAN_COMMENT_SYSTEM_IDENTICON_OUTPUT_PATH'],
article_generator.settings['PELICAN_COMMENT_SYSTEM_IDENTICON_DATA'],
article_generator.settings[
'PELICAN_COMMENT_SYSTEM_IDENTICON_SIZE'] / 3,
article_generator.settings['PELICAN_COMMENT_SYSTEM_AUTHORS'],
)
def warn_on_slug_collision(items):
slugs = {}
for comment in items:
if not comment.slug in slugs:
slugs[comment.slug] = [comment]
else:
slugs[comment.slug].append(comment)
for slug, itemList in slugs.items():
len_ = len(itemList)
if len_ > 1:
logger.warning('There are %s comments with the same slug: %s', len_, slug)
for x in itemList:
logger.warning(' %s', x.source_path)
def write_feed_all(gen, writer):
if gen.settings['PELICAN_COMMENT_SYSTEM'] is not True:
return
if gen.settings['PELICAN_COMMENT_SYSTEM_FEED_ALL'] is None:
return
context = copy.copy(gen.context)
context['SITENAME'] += " - All Comments"
context['SITESUBTITLE'] = ""
path = gen.settings['PELICAN_COMMENT_SYSTEM_FEED_ALL']
global _all_comments
_all_comments = sorted(_all_comments)
_all_comments.reverse()
for com in _all_comments:
com.title = com.article.title + " - " + com.title
com.override_url = com.article.url + com.url
writer = Writer(gen.output_path, settings=gen.settings)
writer.write_feed(_all_comments, context, path)
def write_feed(gen, items, context, slug):
if gen.settings['PELICAN_COMMENT_SYSTEM_FEED'] is None:
return
path = gen.settings['PELICAN_COMMENT_SYSTEM_FEED'] % slug
writer = Writer(gen.output_path, settings=gen.settings)
writer.write_feed(items, context, path)
def add_static_comments(gen, content):
if gen.settings['PELICAN_COMMENT_SYSTEM'] is not True:
return
global _all_comments
content.comments_count = 0
content.comments = []
# Modify the local context, so we get proper values for the feed
context = copy.copy(gen.context)
context['SITEURL'] += "/" + content.url
context['SITENAME'] += " - Comments: " + content.title
context['SITESUBTITLE'] = ""
folder = os.path.join(
gen.settings['PATH'],
gen.settings['PELICAN_COMMENT_SYSTEM_DIR'],
content.slug
)
if not os.path.isdir(folder):
logger.debug("No comments found for: %s", content.slug)
write_feed(gen, [], context, content.slug)
return
reader = Readers(gen.settings)
comments = []
replies = []
for file in os.listdir(folder):
name, extension = os.path.splitext(file)
if extension[1:].lower() in reader.extensions:
com = reader.read_file(
base_path=folder, path=file,
content_class=Comment, context=context)
com.article = content
_all_comments.append(com)
if hasattr(com, 'replyto'):
replies.append(com)
else:
comments.append(com)
feed_items = sorted(comments + replies)
feed_items.reverse()
warn_on_slug_collision(feed_items)
write_feed(gen, feed_items, context, content.slug)
# TODO: Fix this O(n²) loop
for reply in replies:
for comment in chain(comments, replies):
if comment.slug == reply.replyto:
comment.addReply(reply)
count = 0
for comment in comments:
comment.sortReplies()
count += comment.countReplies()
comments = sorted(comments)
content.comments_count = len(comments) + count
content.comments = comments
def writeIdenticonsToDisk(gen, writer):
avatars.generateAndSaveMissingAvatars()
def pelican_finalized(pelican):
if pelican.settings['PELICAN_COMMENT_SYSTEM'] is not True:
return
global _all_comments
print('Processed %s comment(s)' % len(_all_comments))
_all_comments = []
def register():
signals.initialized.connect(pelican_initialized)
signals.article_generator_init.connect(initialize)
signals.article_generator_write_article.connect(add_static_comments)
signals.article_writer_finalized.connect(writeIdenticonsToDisk)
signals.article_writer_finalized.connect(write_feed_all)
signals.finalized.connect(pelican_finalized)
|
agpl-3.0
|
nathanaevitas/odoo
|
openerp/addons/hr_payroll_account/wizard/__init__.py
|
433
|
1116
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll_payslips_by_employees
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
tsteward/the-blue-alliance
|
tests/test_datafeed_controller.py
|
3
|
1879
|
import unittest2
import datetime
from google.appengine.api import urlfetch
from google.appengine.ext import db
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from google.appengine.ext.webapp import Response
# from controllers.datafeed_controller import UsfirstEventDetailsGet
from models.event import Event
from models.team import Team
class TestUsfirstEventDetailsGet(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_urlfetch_stub()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
def tearDown(self):
self.testbed.deactivate()
# def test_get(self):
# test with 2011ct
# usfirsteventget = UsfirstEventDetailsGet()
# usfirsteventget.response = Response()
# usfirsteventget.get(2011, "5561")
#
# check event object got created
# event = Event.get_by_id("2011ct")
# self.assertEqual(event.name, "Northeast Utilities FIRST Connecticut Regional")
# self.assertEqual(event.event_type, "Regional")
# self.assertEqual(event.start_date, datetime.datetime(2011, 3, 31, 0, 0))
# self.assertEqual(event.end_date, datetime.datetime(2011, 4, 2, 0, 0))
# self.assertEqual(event.year, 2011)
# self.assertEqual(event.venue_address, "Connecticut Convention Center\r\n100 Columbus Blvd\r\nHartford, CT 06103\r\nUSA")
# self.assertEqual(event.website, "http://www.ctfirst.org/ctr")
# self.assertEqual(event.event_short, "ct")
#
# check team objects get created for missing teams
# frc177 = Team.get_by_id("frc177")
# self.assertEqual(frc177.team_number, 177)
# self.assertEqual(frc177.first_tpid, 41633)
|
mit
|
hectord/lettuce
|
tests/integration/lib/Django-1.3/django/contrib/gis/db/backends/postgis/adapter.py
|
311
|
1165
|
"""
This object provides quoting for GEOS geometries into PostgreSQL/PostGIS.
"""
from psycopg2 import Binary
from psycopg2.extensions import ISQLQuote
class PostGISAdapter(object):
def __init__(self, geom):
"Initializes on the geometry."
# Getting the WKB (in string form, to allow easy pickling of
# the adaptor) and the SRID from the geometry.
self.ewkb = str(geom.ewkb)
self.srid = geom.srid
def __conform__(self, proto):
# Does the given protocol conform to what Psycopg2 expects?
if proto == ISQLQuote:
return self
else:
raise Exception('Error implementing psycopg2 protocol. Is psycopg2 installed?')
def __eq__(self, other):
return (self.ewkb == other.ewkb) and (self.srid == other.srid)
def __str__(self):
return self.getquoted()
def getquoted(self):
"Returns a properly quoted string for use in PostgreSQL/PostGIS."
# Want to use WKB, so wrap with psycopg2 Binary() to quote properly.
return 'ST_GeomFromEWKB(E%s)' % Binary(self.ewkb)
def prepare_database_save(self, unused):
return self
|
gpl-3.0
|
akulakov/mangotrac
|
proj_issues/issues/views.py
|
1
|
16649
|
# Imports {{{
from __future__ import print_function, unicode_literals, division
from pprint import pprint
from difflib import Differ
from django.http import HttpResponse
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib.admin.views.decorators import staff_member_required
from django.forms import forms
from django.core.mail import send_mail
from django.template.defaultfilters import date
from django.contrib.auth.decorators import permission_required
from django.utils.decorators import method_decorator
from django.forms.formsets import formset_factory, BaseFormSet, all_valid
from django.forms.models import modelformset_factory
from django.db.models import Q
from shared.utils import *
from issues.models import *
from issues.forms import *
from mcbv.edit import CreateView, UpdateView, FormSetView, ModelFormSetView
from mcbv.base import TemplateView
from mcbv.detail import DetailView
from mcbv.list_custom import DetailListCreateView, ListView
# }}}
def context_processor(request):
return dict(app_name="MangoTrac")
# add_issue delete_issue
@staff_member_required
def update_issue(request, pk, mode=None, action=None):
""" AJAX view, toggle Closed on/off, set progress or delete an issue.
closed toggle logic:
done/won't fix => open
any other value => done
"""
issue = Issue.obj.get(pk=pk)
open_code = settings.SPECIAL_STATUS_CODES["open"]
done_code = settings.SPECIAL_STATUS_CODES["done"]
s_open = Status.obj.filter(status=open_code).first()
s_done = Status.obj.filter(status=done_code).first()
if mode == "delete":
issue.delete()
return redir("admin:issues_issue_changelist")
else:
if mode == "progress":
val = int(action)
setattr(issue, mode, val)
elif mode == "closed":
mode = "status"
if action == "on":
val = s_done
status = "closed"
else:
val = s_open
status = "opened"
# title = "Issue %s %s" % (issue, status)
msg_tpl = "Issue '%s' was " + status + " <%s%s>\n\n%s"
NotificationMixin().send_notification(issue, msg_tpl, make_diff=False, show_descr=False, request=request)
setattr(issue, mode, val)
issue.save()
return HttpResponse('')
@staff_member_required
def delete_comment(request, pk):
Comment.obj.get(pk=pk).delete()
return redir(referer(request))
class NotificationMixin:
def diff(self, oldobj, obj):
"""Create a diff of `obj` vs. `oldobj`; description is handled using difflib module."""
difflist = []
skip = "description_html".split()
nl = '\n'
for fld in obj._meta.fields:
name = fld.name
if name not in skip:
oldval = getattr(oldobj, fld.name)
val = getattr(obj, fld.name)
if name == "description":
olddesc = oldobj.description.splitlines(1)
desc = obj.description.splitlines(1)
if olddesc:
olddesc[-1] = olddesc[-1].strip() + '\r\n'
if desc:
desc[-1] = desc[-1].strip() + '\r\n'
d = Differ()
result = list(d.compare(olddesc, desc))
# note: Differ returns full(?) content when there are no changes!!!?
if olddesc != desc:
difflist.extend( [nl + "Description diff:" + nl] + result + [nl] )
else:
if oldval != val:
difflist.append("%s: changed from '%s' to '%s'" % (fld.name, oldval, val) + nl)
diff = ''.join(difflist)
return diff
def send_notification(self, obj, msg_tpl, comment_body='', show_descr=True, make_diff=True, request=None):
""" Send notification to creator / new|old owner on issue change.
For description, show a diff; for other fields, show what it changed from / to.
"""
request = request or self.request
oldobj = Issue.obj.get(pk=obj.pk) if make_diff else None
if comment_body:
body = comment_body
elif oldobj:
body = self.diff(oldobj, obj)
elif show_descr:
body = obj.description
else:
body = ''
# from_ = "[email protected]"
old_owner = Issue.obj.get(pk=obj.pk).owner # if owner changed, we need to notify him
from_ = settings.DEFAULT_FROM_EMAIL
serv_root = request.META["HTTP_ORIGIN"]
url = reverse2("issue", dpk=obj.pk)
values = [obj.title, serv_root, url, body]
msg = msg_tpl % tuple(values)
send_to = set()
title = "%s (%s) #%s: %s" % (old_owner, obj.status, obj.pk, obj.title)
send_to.add(old_owner)
send_to.add(obj.owner)
send_to.add(obj.creator)
if settings.TEST_NOTIFY:
send_to = [u.email for u in send_to if u] # use for testing
else:
send_to = [u.email for u in send_to if u and u!=request.user]
if obj.cc:
send_to.extend(obj.cc.split())
send_mail(title, msg, from_, send_to, fail_silently=False)
class ReportList(ListView):
list_model = Report
template_name = "reports.html"
class CreateReport(CreateView):
form_model = Report
modelform_class = ReportForm
template_name = "report_form.html"
def modelform_valid(self, modelform):
resp = super(CreateReport, self).modelform_valid(modelform)
self.modelform_object.update(creator=self.request.user)
return resp
class UpdateReport(UpdateView):
form_model = Report
modelform_class = ReportForm
template_name = "report_form.html"
class DuplicateReport(DetailView):
detail_model = Report
def get(self, request, *args, **kwargs):
report = self.get_detail_object()
report.pk = None
report.name += " copy"
report.save()
return redir("update_report", report.pk)
class IssuesMixin(object):
def add_context(self):
return dict(bold_labels=settings.BOLD_LABELS)
def get_success_url(self):
"""Return to view issue page on success."""
# return reverse("admin:issues_issue_changelist") + "?status__id__exact=1&o=5.-3"
return reverse2("issue", self.modelform_object.pk)
class UpdateIssue(IssuesMixin, UpdateView, NotificationMixin):
form_model = Issue
modelform_class = IssueForm
msg_tpl = "Issue '%s' was updated <%s%s>\n\n%s"
template_name = "issue_form.html"
def modelform_invalid(self, modelform):
preview = None
post = self.request.POST
if "preview" in post:
preview = markdown(post["description"])
return self.get_context_data(modelform=modelform, preview=preview)
def modelform_valid(self, modelform):
""" If form was changed, send notification email the (new) issue owner.
Note: at the start of the function, FK relationships are already updated in `self.object`.
"""
if modelform.has_changed():
self.send_notification(self.modelform_object, self.msg_tpl)
return super(UpdateIssue, self).modelform_valid(modelform)
class CreateIssue(IssuesMixin, CreateView, NotificationMixin):
form_model = Issue
modelform_class = IssueForm
msg_tpl = "Issue '%s' was created <%s%s>\n\n%s"
template_name = "issue_form.html"
def modelform_invalid(self, modelform):
preview = None
post = self.request.POST
if "preview" in post:
preview = markdown(post["description"])
return self.get_context_data(modelform=modelform, preview=preview)
def modelform_valid(self, modelform):
resp = super(CreateIssue, self).modelform_valid(modelform)
self.modelform_object.update(creator=self.request.user)
self.send_notification(self.modelform_object, self.msg_tpl, make_diff=False)
return resp
class UpdateComment(UpdateView):
form_model = Comment
modelform_class = CommentForm
template_name = "issues/comment_form.html"
def get_success_url(self):
return self.modelform_object.issue.get_absolute_url()
class ViewIssue(DetailListCreateView, NotificationMixin):
""" View issue, comments and new comment form.
When new comment is submitted, issue status / owner may also be updated.
"""
detail_model = Issue
list_model = Comment
modelform_class = CommentForm
related_name = "comments"
fk_attr = "issue"
msg_tpl = "Comment was added to the Issue '%s' <%s%s>\n\n%s"
template_name = "issue.html"
def modelform_get(self, request, *args, **kwargs):
"""Get issue modelform with two fields: owner and status; return both comment & issue modelforms."""
modelform2 = OwnerStatusForm(instance=self.detail_object)
return self.get_modelform_context_data( modelform=self.get_modelform(), modelform2=modelform2 )
def add_context(self):
"""List of fields to display at the top of issue."""
fields = "status owner cc project priority_code difficulty type version tags creator created updated".split()
return dict(fields=fields)
def modelform2_valid(self, modelform):
"""Update issue based on the small form with 2 fields."""
if modelform.has_changed():
issue = modelform.save(commit=False)
self.send_notification(issue, UpdateIssue.msg_tpl)
issue.save()
def modelform_valid(self, modelform):
"""Add a comment; send notification email to the issue owner."""
if modelform.has_changed():
resp = super(ViewIssue, self).modelform_valid(modelform)
obj = self.modelform_object
obj.update(creator=self.user)
self.send_notification(obj.issue, self.msg_tpl, comment_body=obj.description)
self.modelform2_valid( OwnerStatusForm(instance=self.detail_object, data=self.request.POST) )
return redir(self.detail_object.get_absolute_url())
class AddIssues(IssuesMixin, FormSetView, NotificationMixin):
"""Create new issues."""
formset_model = Issue
formset_form_class = IssueForm
msg_tpl = "New Issue '%s' was created <%s%s>\n\n%s"
extra = 5
template_name = "add_issues.html"
def get_success_url(self):
# can't redir to issue page because -- multiple issues
return reverse("admin:issues_issue_changelist") + "?status__id__exact=1&o=5.-3"
def process_form(self, form):
issue = form.save(commit=False)
issue.update(creator=self.request.user)
self.send_notification(issue, self.msg_tpl, make_diff=False)
class AttachmentsView(ModelFormSetView, DetailView):
"""Create new issues."""
detail_model = Issue
formset_model = Attachment
formset_form_class = AttachmentForm
msg_tpl = "New attachments '%s' were added <%s%s>\n\n%s"
can_delete = True
extra = 15
template_name = "attachments.html"
def get_success_url(self):
return self.detail_object.get_absolute_url()
def process_form(self, form):
file = form.save(commit=False)
file.creator = self.request.user
file.issue = self.detail_object
file.save()
def formset_valid(self, formset):
"""Handle deletion of attachments."""
for form in formset:
if form.cleaned_data.get("file"):
if form.cleaned_data.get("DELETE"):
form.instance.delete()
else:
self.process_form(form)
return HttpResponseRedirect(self.get_success_url())
class ReportView(DetailView):
detail_model = Report
template_name = "report.html"
def resolve_filter_relations(self, arg_filters, kw_filters):
""" Resolve 1to1 or MtoM filter relations (also add __in and split list of values)
Example:
priority_code = 1, 2 ==>
priority_code__priority__in=(1,2)
"""
relation_filters = dict(
owner = (User, "username"),
status = (Status, "status"),
priority_code = (Priority, "priority"),
project = (Project, "project"),
type = (Type, "type"),
version = (Version, "version"),
tags = (Tag, "tag"),
)
for flt, vals in kw_filters.items():
vals = [v.strip() for v in vals.split(',')]
if flt in relation_filters:
cls, fldname = relation_filters[flt]
kw_filters["%s__%s__in" % (flt, fldname)] = vals
del kw_filters[flt]
else:
if len(vals) > 1:
kw_filters["%s__in" % flt] = vals
del kw_filters[flt]
else:
kw_filters[flt] = vals[0]
def add_context(self):
""" Create grouped and filtered rows of issues based on GET args.
Grouped columns are moved to the left side.
e.g. ?group=owner.project & closed=0 & priority__gt=0
=> group by owner, project; filter out closed and 0 priority issues
"""
group_by = ()
filters = {}
report = self.detail_object
# by default, use all cols
cols = "title owner status priority_code difficulty project type version created progress tags".split()
# get groups and filters
group_by = [l.strip() for l in report.group_by.splitlines() if l.strip()]
sort_by = [l.strip() for l in report.sort_by.splitlines() if l.strip()]
columns = [l.strip() for l in report.columns.splitlines() if l.strip()]
columns = columns or cols
arg_filters = []
kw_filters = dict(
[(k.strip(), v.strip()) for k, v in
[l.split('=', 1) for l in report.filters.splitlines()
if '=' in l]
])
self.resolve_filter_relations(arg_filters, kw_filters)
# move to front (or insert) group by columns
issues = Issue.obj.all().filter(*arg_filters, **kw_filters)
group_by_names = [x.strip('-') for x in group_by] # remove order reversal char
for n in reversed(group_by_names):
if n in columns:
columns.remove(n)
columns.insert(0, n)
# make table rows
issues = issues.order_by( *(group_by + sort_by) )
rows = []
last_row = None
for issue in issues:
row = []
ref_row = [] # reference row, includes omitted values
# when new group starts, subsequent columns need to show the value even if it hasn't changed
reset_group = False
# make row
for n, col in enumerate(columns):
border = col not in group_by_names # no border for groups to make them stand out visually
val = use_val = getattr(issue, col)
if hasattr(val, "all"):
val = use_val = sjoin(val.all(), ', ')
if last_row and col in group_by_names:
last = last_row[n]
# see note above about reset_group
if val != last:
use_val = val
reset_group = True
elif not reset_group:
use_val = ''
if col in ("type", "version") and use_val is None:
use_val = ''
if col == "title":
use_val = "<a href='%s'>%s</a>" % (reverse2("issue", issue.pk), use_val)
if col=="created" or col=="updated":
use_val = date(use_val, "DATETIME_FORMAT")
if col == "description":
use_val = issue.description_html
row.append((use_val, border))
ref_row.append(val)
last_row = ref_row
rows.append(row)
headers = [Issue._meta.get_field(c).verbose_name for c in columns]
return dict(headers=headers, rows=rows)
|
mit
|
GitHublong/hue
|
desktop/core/ext-py/MySQL-python-1.2.5/MySQLdb/connections.py
|
76
|
11777
|
"""
This module implements connections for MySQLdb. Presently there is
only one class: Connection. Others are unlikely. However, you might
want to make your own subclasses. In most cases, you will probably
override Connection.default_cursor with a non-standard Cursor class.
"""
from MySQLdb import cursors
from _mysql_exceptions import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError
import types, _mysql
import re
def defaulterrorhandler(connection, cursor, errorclass, errorvalue):
"""
If cursor is not None, (errorclass, errorvalue) is appended to
cursor.messages; otherwise it is appended to
connection.messages. Then errorclass is raised with errorvalue as
the value.
You can override this with your own error handler by assigning it
to the instance.
"""
error = errorclass, errorvalue
if cursor:
cursor.messages.append(error)
else:
connection.messages.append(error)
del cursor
del connection
raise errorclass, errorvalue
re_numeric_part = re.compile(r"^(\d+)")
def numeric_part(s):
"""Returns the leading numeric part of a string.
>>> numeric_part("20-alpha")
20
>>> numeric_part("foo")
>>> numeric_part("16b")
16
"""
m = re_numeric_part.match(s)
if m:
return int(m.group(1))
return None
class Connection(_mysql.connection):
"""MySQL Database Connection Object"""
default_cursor = cursors.Cursor
def __init__(self, *args, **kwargs):
"""
Create a connection to the database. It is strongly recommended
that you only use keyword parameters. Consult the MySQL C API
documentation for more information.
host
string, host to connect
user
string, user to connect as
passwd
string, password to use
db
string, database to use
port
integer, TCP/IP port to connect to
unix_socket
string, location of unix_socket to use
conv
conversion dictionary, see MySQLdb.converters
connect_timeout
number of seconds to wait before the connection attempt
fails.
compress
if set, compression is enabled
named_pipe
if set, a named pipe is used to connect (Windows only)
init_command
command which is run once the connection is created
read_default_file
file from which default client values are read
read_default_group
configuration group to use from the default file
cursorclass
class object, used to create cursors (keyword only)
use_unicode
If True, text-like columns are returned as unicode objects
using the connection's character set. Otherwise, text-like
columns are returned as strings. columns are returned as
normal strings. Unicode objects will always be encoded to
the connection's character set regardless of this setting.
charset
If supplied, the connection character set will be changed
to this character set (MySQL-4.1 and newer). This implies
use_unicode=True.
sql_mode
If supplied, the session SQL mode will be changed to this
setting (MySQL-4.1 and newer). For more details and legal
values, see the MySQL documentation.
client_flag
integer, flags to use or 0
(see MySQL docs or constants/CLIENTS.py)
ssl
dictionary or mapping, contains SSL connection parameters;
see the MySQL documentation for more details
(mysql_ssl_set()). If this is set, and the client does not
support SSL, NotSupportedError will be raised.
local_infile
integer, non-zero enables LOAD LOCAL INFILE; zero disables
autocommit
If False (default), autocommit is disabled.
If True, autocommit is enabled.
If None, autocommit isn't set and server default is used.
There are a number of undocumented, non-standard methods. See the
documentation for the MySQL C API for some hints on what they do.
"""
from MySQLdb.constants import CLIENT, FIELD_TYPE
from MySQLdb.converters import conversions
from weakref import proxy
kwargs2 = kwargs.copy()
if 'conv' in kwargs:
conv = kwargs['conv']
else:
conv = conversions
conv2 = {}
for k, v in conv.items():
if isinstance(k, int) and isinstance(v, list):
conv2[k] = v[:]
else:
conv2[k] = v
kwargs2['conv'] = conv2
cursorclass = kwargs2.pop('cursorclass', self.default_cursor)
charset = kwargs2.pop('charset', '')
if charset:
use_unicode = True
else:
use_unicode = False
use_unicode = kwargs2.pop('use_unicode', use_unicode)
sql_mode = kwargs2.pop('sql_mode', '')
client_flag = kwargs.get('client_flag', 0)
client_version = tuple([ numeric_part(n) for n in _mysql.get_client_info().split('.')[:2] ])
if client_version >= (4, 1):
client_flag |= CLIENT.MULTI_STATEMENTS
if client_version >= (5, 0):
client_flag |= CLIENT.MULTI_RESULTS
kwargs2['client_flag'] = client_flag
# PEP-249 requires autocommit to be initially off
autocommit = kwargs2.pop('autocommit', False)
super(Connection, self).__init__(*args, **kwargs2)
self.cursorclass = cursorclass
self.encoders = dict([ (k, v) for k, v in conv.items()
if type(k) is not int ])
self._server_version = tuple([ numeric_part(n) for n in self.get_server_info().split('.')[:2] ])
db = proxy(self)
def _get_string_literal():
def string_literal(obj, dummy=None):
return db.string_literal(obj)
return string_literal
def _get_unicode_literal():
def unicode_literal(u, dummy=None):
return db.literal(u.encode(unicode_literal.charset))
return unicode_literal
def _get_string_decoder():
def string_decoder(s):
return s.decode(string_decoder.charset)
return string_decoder
string_literal = _get_string_literal()
self.unicode_literal = unicode_literal = _get_unicode_literal()
self.string_decoder = string_decoder = _get_string_decoder()
if not charset:
charset = self.character_set_name()
self.set_character_set(charset)
if sql_mode:
self.set_sql_mode(sql_mode)
if use_unicode:
self.converter[FIELD_TYPE.STRING].append((None, string_decoder))
self.converter[FIELD_TYPE.VAR_STRING].append((None, string_decoder))
self.converter[FIELD_TYPE.VARCHAR].append((None, string_decoder))
self.converter[FIELD_TYPE.BLOB].append((None, string_decoder))
self.encoders[types.StringType] = string_literal
self.encoders[types.UnicodeType] = unicode_literal
self._transactional = self.server_capabilities & CLIENT.TRANSACTIONS
if self._transactional:
if autocommit is not None:
self.autocommit(autocommit)
self.messages = []
def autocommit(self, on):
on = bool(on)
if self.get_autocommit() != on:
_mysql.connection.autocommit(self, on)
def cursor(self, cursorclass=None):
"""
Create a cursor on which queries may be performed. The
optional cursorclass parameter is used to create the
Cursor. By default, self.cursorclass=cursors.Cursor is
used.
"""
return (cursorclass or self.cursorclass)(self)
def __enter__(self):
if self.get_autocommit():
self.query("BEGIN")
return self.cursor()
def __exit__(self, exc, value, tb):
if exc:
self.rollback()
else:
self.commit()
def literal(self, o):
"""
If o is a single object, returns an SQL literal as a string.
If o is a non-string sequence, the items of the sequence are
converted and returned as a sequence.
Non-standard. For internal use; do not use this in your
applications.
"""
return self.escape(o, self.encoders)
def begin(self):
"""Explicitly begin a connection. Non-standard.
DEPRECATED: Will be removed in 1.3.
Use an SQL BEGIN statement instead."""
from warnings import warn
warn("begin() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
self.query("BEGIN")
if not hasattr(_mysql.connection, 'warning_count'):
def warning_count(self):
"""Return the number of warnings generated from the
last query. This is derived from the info() method."""
from string import atoi
info = self.info()
if info:
return atoi(info.split()[-1])
else:
return 0
def set_character_set(self, charset):
"""Set the connection character set to charset. The character
set can only be changed in MySQL-4.1 and newer. If you try
to change the character set from the current value in an
older version, NotSupportedError will be raised."""
if charset == "utf8mb4":
py_charset = "utf8"
else:
py_charset = charset
if self.character_set_name() != charset:
try:
super(Connection, self).set_character_set(charset)
except AttributeError:
if self._server_version < (4, 1):
raise NotSupportedError("server is too old to set charset")
self.query('SET NAMES %s' % charset)
self.store_result()
self.string_decoder.charset = py_charset
self.unicode_literal.charset = py_charset
def set_sql_mode(self, sql_mode):
"""Set the connection sql_mode. See MySQL documentation for
legal values."""
if self._server_version < (4, 1):
raise NotSupportedError("server is too old to set sql_mode")
self.query("SET SESSION sql_mode='%s'" % sql_mode)
self.store_result()
def show_warnings(self):
"""Return detailed information about warnings as a
sequence of tuples of (Level, Code, Message). This
is only supported in MySQL-4.1 and up. If your server
is an earlier version, an empty sequence is returned."""
if self._server_version < (4,1): return ()
self.query("SHOW WARNINGS")
r = self.store_result()
warnings = r.fetch_row(0)
return warnings
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
errorhandler = defaulterrorhandler
|
apache-2.0
|
h3biomed/ansible
|
lib/ansible/modules/cloud/amazon/ec2_eip_facts.py
|
11
|
4022
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_eip_facts
short_description: List EC2 EIP details
description:
- List details of EC2 Elastic IP addresses.
version_added: "2.6"
author: "Brad Macpherson (@iiibrad)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and filter
value. See U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-addresses.html#options)
for possible filters. Filter names and values are case sensitive.
required: false
default: {}
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details or the AWS region,
# see the AWS Guide for details.
# List all EIP addresses in the current region.
- ec2_eip_facts:
register: regional_eip_addresses
# List all EIP addresses for a VM.
- ec2_eip_facts:
filters:
instance-id: i-123456789
register: my_vm_eips
- debug: msg="{{ my_vm_eips.addresses | json_query(\"[?private_ip_address=='10.0.0.5']\") }}"
# List all EIP addresses for several VMs.
- ec2_eip_facts:
filters:
instance-id:
- i-123456789
- i-987654321
register: my_vms_eips
# List all EIP addresses using the 'Name' tag as a filter.
- ec2_eip_facts:
filters:
tag:Name: www.example.com
register: my_vms_eips
# List all EIP addresses using the Allocation-id as a filter
- ec2_eip_facts:
filters:
allocation-id: eipalloc-64de1b01
register: my_vms_eips
# Set the variable eip_alloc to the value of the first allocation_id
# and set the variable my_pub_ip to the value of the first public_ip
- set_fact:
eip_alloc: my_vms_eips.addresses[0].allocation_id
my_pub_ip: my_vms_eips.addresses[0].public_ip
'''
RETURN = '''
addresses:
description: Properties of all Elastic IP addresses matching the provided filters. Each element is a dict with all the information related to an EIP.
returned: on success
type: list
sample: [{
"allocation_id": "eipalloc-64de1b01",
"association_id": "eipassoc-0fe9ce90d6e983e97",
"domain": "vpc",
"instance_id": "i-01020cfeb25b0c84f",
"network_interface_id": "eni-02fdeadfd4beef9323b",
"network_interface_owner_id": "0123456789",
"private_ip_address": "10.0.0.1",
"public_ip": "54.81.104.1",
"tags": {
"Name": "test-vm-54.81.104.1"
}
}]
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
boto3_tag_list_to_ansible_dict,
camel_dict_to_snake_dict)
try:
from botocore.exceptions import (BotoCoreError, ClientError)
except ImportError:
pass # caught by imported AnsibleAWSModule
def get_eips_details(module):
connection = module.client('ec2')
filters = module.params.get("filters")
try:
response = connection.describe_addresses(
Filters=ansible_dict_to_boto3_filter_list(filters)
)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(
e,
msg="Error retrieving EIPs")
addresses = camel_dict_to_snake_dict(response)['addresses']
for address in addresses:
if 'tags' in address:
address['tags'] = boto3_tag_list_to_ansible_dict(address['tags'])
return addresses
def main():
module = AnsibleAWSModule(
argument_spec=dict(
filters=dict(type='dict', default={})
),
supports_check_mode=True
)
module.exit_json(changed=False, addresses=get_eips_details(module))
if __name__ == '__main__':
main()
|
gpl-3.0
|
curtacircuitos/pcb-tools
|
gerber/common.py
|
2
|
2148
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 Hamilton Kibbe <[email protected]>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import rs274x
from . import excellon
from . import ipc356
from .exceptions import ParseError
from .utils import detect_file_format
def read(filename):
""" Read a gerber or excellon file and return a representative object.
Parameters
----------
filename : string
Filename of the file to read.
Returns
-------
file : CncFile subclass
CncFile object representing the file, either GerberFile, ExcellonFile,
or IPCNetlist. Returns None if file is not of the proper type.
"""
with open(filename, 'rU') as f:
data = f.read()
return loads(data, filename)
def loads(data, filename=None):
""" Read gerber or excellon file contents from a string and return a
representative object.
Parameters
----------
data : string
Source file contents as a string.
filename : string, optional
String containing the filename of the data source.
Returns
-------
file : CncFile subclass
CncFile object representing the data, either GerberFile, ExcellonFile,
or IPCNetlist. Returns None if data is not of the proper type.
"""
fmt = detect_file_format(data)
if fmt == 'rs274x':
return rs274x.loads(data, filename=filename)
elif fmt == 'excellon':
return excellon.loads(data, filename=filename)
elif fmt == 'ipc_d_356':
return ipc356.loads(data, filename=filename)
else:
raise ParseError('Unable to detect file format')
|
apache-2.0
|
lduarte1991/edx-platform
|
common/djangoapps/student/management/commands/change_enrollment.py
|
2
|
5479
|
""" Command line script to change user enrollments. """
import logging
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from student.models import CourseEnrollment, User
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class RollbackException(Exception):
"""
Exception raised explicitly to cause a database transaction rollback.
"""
pass
class Command(BaseCommand):
help = """
Changes the enrollment status for students that meet
the criteria specified by the parameters to this command.
Example:
Change enrollment for users joe, frank, and bill from audit to honor:
$ ... change_enrollment -u joe,frank,bill -c some/course/id --from audit --to honor
Or
$ ... change_enrollment -e "[email protected],[email protected],..." -c some/course/id --from audit --to honor
See what would have been changed from audit to honor without making that change
$ ... change_enrollment -u joe,frank,bill -c some/course/id --from audit --to honor -n
"""
enrollment_modes = ('audit', 'verified', 'honor')
def add_arguments(self, parser):
parser.add_argument('-f', '--from',
metavar='FROM_MODE',
dest='from_mode',
required=True,
choices=self.enrollment_modes,
help='Move from this enrollment mode')
parser.add_argument('-t', '--to',
metavar='TO_MODE',
dest='to_mode',
required=True,
choices=self.enrollment_modes,
help='Move to this enrollment mode')
parser.add_argument('-u', '--username',
metavar='USERNAME',
help='Comma-separated list of usernames to move in the course')
parser.add_argument('-e', '--email',
metavar='EMAIL',
help='Comma-separated list of email addresses to move in the course')
parser.add_argument('-c', '--course',
metavar='COURSE_ID',
dest='course_id',
required=True,
help='Course id to use for transfer')
parser.add_argument('-n', '--noop',
action='store_true',
help='Display what will be done but do not actually do anything')
def handle(self, *args, **options):
try:
course_key = CourseKey.from_string(options['course_id'])
except InvalidKeyError:
raise CommandError('Invalid or non-existant course id {}'.format(options['course_id']))
if not options['username'] and not options['email']:
raise CommandError('You must include usernames (-u) or emails (-e) to select users to update')
enrollment_args = dict(
course_id=course_key,
mode=options['from_mode']
)
error_users = []
success_users = []
if options['username']:
self.update_enrollments('username', enrollment_args, options, error_users, success_users)
if options['email']:
self.update_enrollments('email', enrollment_args, options, error_users, success_users)
self.report(error_users, success_users)
def update_enrollments(self, identifier, enrollment_args, options, error_users, success_users):
""" Update enrollments for a specific user identifier (email or username). """
users = options[identifier].split(",")
for identified_user in users:
logger.info(identified_user)
try:
user_args = {
identifier: identified_user
}
enrollment_args['user'] = User.objects.get(**user_args)
enrollments = CourseEnrollment.objects.filter(**enrollment_args)
with transaction.atomic():
for enrollment in enrollments:
enrollment.update_enrollment(mode=options['to_mode'])
enrollment.save()
if options['noop']:
raise RollbackException('Forced rollback.')
except RollbackException:
success_users.append(identified_user)
continue
except Exception as exception: # pylint: disable=broad-except
error_users.append((identified_user, exception))
continue
success_users.append(identified_user)
logger.info('Updated user [%s] to mode [%s]', identified_user, options['to_mode'])
def report(self, error_users, success_users):
""" Log and overview of the results of the command. """
total_users = len(success_users) + len(error_users)
logger.info('Successfully updated %i out of %i users', len(success_users), total_users)
if len(error_users) > 0:
logger.info('The following %i user(s) not saved:', len(error_users))
for user, error in error_users:
logger.info('user: [%s] reason: [%s] %s', user, type(error).__name__, error.message)
|
agpl-3.0
|
walkover/auto-tracking-cctv-gateway
|
gateway/mobile/server.py
|
1
|
4360
|
import json
import logging
import struct
from flask import Flask, request
from tornado import gen
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.tcpserver import TCPServer
from tornado.wsgi import WSGIContainer
from gateway.app import gateway
from gateway.conf import (
MOBILE_NETWORK_IP,
MOBILE_NETWORK_HTTP_PORT,
MOBILE_NETWORK_TCP_PORT,
)
from gateway.firebase import fcm
flask = Flask(__name__)
@flask.route('/cameras', methods=['GET'])
def handle_camera_list_request():
cameras = gateway.camera_server.cameras()
if not cameras:
return json.dumps({
'success': False,
'reason': 'cameras are not initialized.'
})
else:
return json.dumps([camera.to_dict() for camera in cameras])
@flask.route('/camera/<int:camera_id>', methods=['GET'])
def handle_camera_request(camera_id):
camera = gateway.camera_server.camera(camera_id)
if not camera:
return json.dumps({
'success': False,
'reason': 'camera {} is not exist.'.format(camera_id)
})
else:
return json.dumps(camera.to_dict())
@flask.route('/camera/<int:camera_id>/move', methods=['POST'])
def handle_camera_move_request(camera_id):
body = request.data.decode('utf-8')
body = json.loads(body)
direction = body['direction']
camera = gateway.camera_server.camera(camera_id)
if not camera:
return json.dumps({
'success': False,
'reason': 'camera {} is not exist.'.format(camera_id)
})
else:
if not camera.auto_mode:
camera.move(direction)
return json.dumps({
'success': True
})
@flask.route('/camera/<int:camera_id>/mode', methods=['POST'])
def handle_camera_mode_request(camera_id):
body = request.data.decode('utf-8')
body = json.loads(body)
mode = body['mode']
camera = gateway.camera_server.camera(camera_id)
if not camera:
return json.dumps({
'success': False,
'reason': 'camera {} is not exist.'.format(camera_id)
})
else:
if mode == 'AUTO':
camera.auto_mode = True
else:
camera.auto_mode = False
return json.dumps({
'success': True
})
@flask.route('/token', methods=['POST'])
def handle_update_token():
body = request.data.decode('utf-8')
body = json.loads(body)
if 'token' in body:
token = body['token']
logging.debug('Received firebase token: %s', token)
fcm.insert_token(token)
return json.dumps({
'success': True
})
class MobileTCPServer(TCPServer):
def __init__(self, parent):
super(MobileTCPServer, self).__init__()
self.__parent = parent
@gen.coroutine
def handle_stream(self, stream, address):
logging.info('New mobile stream {} from {}'.format(stream, address))
camera_id = None
def on_close(data):
logging.info('Close mobile stream {}'.format(stream))
camera = gateway.camera_server.camera(camera_id)
if camera is not None:
camera.unsubscribe(stream)
def on_data(data):
logging.info('Read camera id from mobile stream {}'.format(stream))
camera_id = int(struct.unpack('!Q', data)[0])
camera = gateway.camera_server.camera(camera_id)
if camera is not None:
camera.subscribe(stream)
stream.read_until_close(on_close)
stream.read_bytes(struct.calcsize('!Q'), on_data)
class MobileServer(object):
def __init__(self):
self.__http_server = HTTPServer(WSGIContainer(flask))
self.__tcp_server = MobileTCPServer(self)
def listen(self,
http_port=MOBILE_NETWORK_HTTP_PORT,
tcp_port=MOBILE_NETWORK_TCP_PORT,
address=MOBILE_NETWORK_IP):
self.__http_server.listen(http_port, address=address)
logging.info('Listening mobile http server on {}:{}'.
format(address, http_port))
self.__tcp_server.listen(tcp_port, address=address)
logging.info('Listening mobile tcp server on {}:{}'.
format(address, tcp_port))
|
mit
|
tmclaugh/lightblue-0.4
|
src/linux/_discoveryui.py
|
49
|
17786
|
# Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
try:
from Tkinter import *
except ImportError, e:
raise ImportError("Error loading GUIs for selectdevice() and selectservice(), Tkinter not found: " + str(e))
# Provides services for controlling a listbox, tracking selections, etc.
class ListboxController(object):
def __init__(self, listbox, cb_chosen):
"""
Arguments:
- cb_chosen: called when a listbox item is chosen -- i.e. when
an item is double-clicked or the <Return> key is pressed while
an item is selected.
"""
self.setlistbox(listbox)
self.cb_chosen = cb_chosen
self.__alarmIDs = {}
def setlistbox(self, listbox):
self.listbox = listbox
self.listbox.bind("<Double-Button-1>", lambda evt: self._chosen())
self.listbox.bind("<Return>", lambda evt: lambda evt: self._chosen())
# adds an item to the list
def add(self, *items):
for item in items:
self.listbox.insert(END, item)
# clears items in listbox & refreshes UI
def clear(self):
self.listbox.delete(0, END)
# selects an item in the list.
# pass index=None to deselect.
def select(self, index):
self._deselect()
if index is not None:
self.listbox.selection_set(index)
self.listbox.focus()
def _deselect(self):
selected = self.selectedindex()
if selected != -1:
self.listbox.selection_clear(selected)
def selectedindex(self):
sel = self.listbox.curselection()
if len(sel) > 0:
return int(sel[0])
return -1
# starts polling the listbox for a user selection and calls cb_selected
# when an item is selected.
def track(self, cb_selected, interval=100):
self._track(interval, -1, cb_selected)
def _track(self, interval, lastindex, callback):
index = self.selectedindex()
if index != -1 and index != lastindex:
callback(index)
# recursively keep tracking
self.__alarmIDs[id(self.listbox)] = self.listbox.after(
interval, self._track, interval, index, callback)
def stoptracking(self):
for x in self.__alarmIDs.values():
self.listbox.after_cancel(x)
def focus(self):
self.listbox.focus()
def update(self):
self.listbox.update()
# called when a selection has been chosen (i.e. pressed return / dbl-click)
def _chosen(self):
index = self.selectedindex()
if index != -1:
self.cb_chosen(index)
# A frame which contains a listbox and has a title above the listbox.
class StandardListboxFrame(Frame):
def __init__(self, parent, title, boxwidth=28):
Frame.__init__(self, parent)
self.pack()
self.buildUI(parent, title, boxwidth)
def buildUI(self, parent, title, boxwidth):
bigframe = Frame(parent)
bigframe.pack(side=LEFT, fill=BOTH, expand=1)
self.titlelabel = Label(bigframe, text=title)
self.titlelabel.pack(side=TOP)
mainframe = Frame(bigframe, bd=1, relief=SUNKEN)
mainframe.pack(side=BOTTOM, fill=BOTH, expand=1)
scrollbar = Scrollbar(mainframe)
scrollbar.pack(side=RIGHT, fill=Y)
self.listbox = Listbox(mainframe, bd=1, exportselection=0)
self.listbox.pack(fill=BOTH, expand=1)
self.listbox.config(background="white", width=boxwidth)
# attach listbox to scrollbar
self.listbox.config(yscrollcommand=scrollbar.set)
scrollbar.config(command=self.listbox.yview)
def settitle(self, title):
self.titlelabel.config(text=title)
class StatusBar(object):
def __init__(self, parent, side=TOP, text=""):
self.label = Label(parent, text=text, bd=0, pady=8)
self.label.pack(side=side, fill=BOTH, expand=1)
def settext(self, text):
self.label.config(text=text)
# makes UI with top pane, status bar below top pane, and bottom pane.
# Probably should use a grid geometry manager instead, might be easier.
class LayoutFrame(Frame):
def __init__(self, parent):
Frame.__init__(self, parent, padx=10, pady=5) # inner padding
self.topframe = Frame(self)
self.topframe.pack(side=TOP, fill=BOTH, expand=1)
self.statusbar = StatusBar(self)
self.lineframe = Frame(self, height=1, bg="#999999")
self.lineframe.pack(side=TOP, fill=BOTH, expand=1)
self.bottomframe = Frame(self, pady=5)
self.bottomframe.pack(side=BOTTOM, fill=BOTH, expand=1)
# Abstract class for controlling and tracking selections for a listbox.
class ItemSelectionController(object):
def __init__(self, listbox, cb_chosen):
self.cb_chosen = cb_chosen
self._controller = ListboxController(listbox, self._chosen)
self._closed = False
def getselection(self):
index = self._controller.selectedindex()
if index != -1:
return self._getitem(index)
return None
# set callback=None to switch off tracking
def trackselections(self, callback, interval=100):
if callback is not None:
self.cb_selected = callback
self._controller.track(self._selected, interval)
else:
self._controller.stoptracking()
def close(self):
self._controller.stoptracking()
self._closed = True
def closed(self):
return self._closed
# called when an item is chosen (e.g. dbl-clicked, not just selected)
def _chosen(self, index):
if self.cb_chosen:
self.cb_chosen(self._getitem(index))
def _selected(self, index):
if self.cb_selected:
self.cb_selected(self._getitem(index))
# move focus to this listbox
self._controller.focus()
def getitemcount(self):
raise NotImplementedError
def _getitem(self, index):
raise NotImplementedError
class DeviceSelectionController(ItemSelectionController):
# keep cache across instances (and across different sessions)
_cache = []
def __init__(self, listbox, cb_chosen):
super(DeviceSelectionController, self).__init__(listbox, cb_chosen)
self._discoverer = None
self.__items = []
self._loadcache()
def close(self):
self._stopdiscovery()
DeviceSelectionController._cache = self.__items[:]
super(DeviceSelectionController, self).close()
def refreshdevices(self):
self.__items = []
self._controller.clear()
self._controller.update()
self._stopdiscovery()
self._discoverer = _DeviceDiscoverer(self._founddevice, None)
self._discoverer.find_devices(duration=10)
#self._test("device", 0, 5)
def _additem(self, deviceinfo):
self.__items.append(deviceinfo)
self._controller.add(deviceinfo[1]) # add name
def getitemcount(self):
return len(self.__items)
def _getitem(self, index):
return self.__items[index]
def _founddevice(self, address, deviceclass, name):
self._additem((address, name, deviceclass))
# push updates to ensure names are progressively added to the display
self._controller.listbox.update()
def _loadcache(self):
for item in DeviceSelectionController._cache:
self._additem(item)
def _stopdiscovery(self):
if self._discoverer is not None:
self._discoverer.cancel_inquiry()
def _test(self, desc, n, max):
import threading
if n < max:
dummy = ("00:00:00:00:00:"+str(n), "Device-" + str(n), 0)
self._additem(dummy)
threading.Timer(1.0, self._test, [desc, n+1, max]).start()
class ServiceSelectionController(ItemSelectionController):
def __init__(self, listbox, cb_chosen):
super(ServiceSelectionController, self).__init__(listbox, cb_chosen)
self.__items = []
# keep cache for each session (i.e. each time window is opened)
self._sessioncache = {}
def _additem(self, service):
self.__items.append(service)
self._controller.add(self._getservicedesc(service))
def getitemcount(self):
return len(self.__items)
# show services for given device address
# pass address=None to clear display
def showservices(self, address):
self.__items = []
self._controller.clear()
if address is None: return
services = self._sessioncache.get(address)
if not services:
import lightblue
services = lightblue.findservices(address)
#services = [("", 1, "one"), ("", 2, "two"), ("", 3, "three")]
self._sessioncache[address] = services
if len(services) > 0:
for service in services:
self._additem(service)
def _getitem(self, index):
return self.__items[index]
def _getservicedesc(self, service):
address, port, name = service
return "(%s) %s" % (str(port), name)
class DeviceSelector(Frame):
title = "Select Bluetooth device"
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.pack()
self._buildUI()
self._selection = None
self._closed = False
self.master.bind("<Escape>", lambda evt: self._clickedcancel())
def _buildUI(self):
mainframe = LayoutFrame(self)
mainframe.pack()
self._statusbar = mainframe.statusbar
self._buildlistdisplay(mainframe.topframe)
self._buildbuttons(mainframe.bottomframe)
def _buildlistdisplay(self, parent):
self.devicesframe = StandardListboxFrame(parent, "Devices",
boxwidth=38)
self.devicesframe.pack(side=LEFT, fill=BOTH, expand=1)
self._devicemanager = DeviceSelectionController(
self.devicesframe.listbox, self._chosedevice)
def _buildbuttons(self, parent):
self._searchbutton = Button(parent, text="Search for devices",
command=self._clickedsearch)
self._searchbutton.pack(side=LEFT)
self._selectbutton = Button(parent, text="Select",
command=self._clickedselect)
self._selectbutton.pack(side=RIGHT)
self._selectbutton.config(state=DISABLED)
self._cancelbutton = Button(parent, text="Cancel",
command=self._clickedcancel)
self._cancelbutton.pack(side=RIGHT)
def run(self):
try:
self._trackselections(True)
# run gui event loop
self.mainloop()
except Exception, e:
print "Warning: error during device selection:", e
def _trackselections(self, track):
if track:
self._devicemanager.trackselections(self._selecteddevice)
else:
self._devicemanager.trackselections(None)
def getresult(self):
return self._selection
def _selecteddevice(self, device):
self._selectbutton.config(state=NORMAL)
def _chosedevice(self, device):
self._clickedselect()
def _clickedsearch(self):
self._statusbar.settext("Searching for nearby devices...")
self._searchbutton.config(state=DISABLED)
self._selectbutton.config(state=DISABLED)
self.update()
self._devicemanager.refreshdevices()
if not self._closed:
self._statusbar.settext(
"Found %d devices." % self._devicemanager.getitemcount())
self._searchbutton.config(state=NORMAL)
def _clickedcancel(self):
self._quit()
def _clickedselect(self):
self._selection = self._devicemanager.getselection()
self._quit()
def _quit(self):
self._closed = True
self._devicemanager.close()
#Frame.quit(self) # doesn't close the window
self.master.destroy()
class ServiceSelector(DeviceSelector):
title = "Select Bluetooth service"
def _buildlistdisplay(self, parent):
self.devicesframe = StandardListboxFrame(parent, "Devices")
self.devicesframe.pack(side=LEFT, fill=BOTH, expand=1)
self._devicemanager = DeviceSelectionController(
self.devicesframe.listbox, self._pickeddevice)
# hack some space in between the 2 lists
spacerframe = Frame(parent, width=10)
spacerframe.pack(side=LEFT, fill=BOTH, expand=1)
self.servicesframe = StandardListboxFrame(parent, "Services")
self.servicesframe.pack(side=LEFT, fill=BOTH, expand=1)
self._servicemanager = ServiceSelectionController(
self.servicesframe.listbox, self._choseservice)
def _trackselections(self, track):
if track:
self._devicemanager.trackselections(self._pickeddevice)
self._servicemanager.trackselections(self._selectedservice)
else:
self._devicemanager.trackselections(None)
self._servicemanager.trackselections(None)
def _clearservices(self):
self.servicesframe.settitle("Services")
self._servicemanager.showservices(None) # clear services list
# called when a device is selected, or chosen
def _pickeddevice(self, deviceinfo):
self._clearservices()
self._statusbar.settext("Finding services for %s..." % deviceinfo[1])
self._selectbutton.config(state=DISABLED)
self._searchbutton.config(state=DISABLED)
self.update()
self._servicemanager.showservices(deviceinfo[0])
if not self._closed: # user might have clicked 'cancel'
self.servicesframe.settitle("%s's services" % deviceinfo[1])
self._statusbar.settext("Found %d services for %s." % (
self._servicemanager.getitemcount(),
deviceinfo[1]))
self._searchbutton.config(state=NORMAL)
def _selectedservice(self, service):
self._selectbutton.config(state=NORMAL)
def _choseservice(self, service):
self._clickedselect()
def _clickedsearch(self):
self._clearservices()
self._trackselections(False) # don't track selections while searching
# do the search
DeviceSelector._clickedsearch(self)
# re-enable selection tracking
if not self._closed:
self._trackselections(True)
def _clickedselect(self):
self._selection = self._servicemanager.getselection()
self._quit()
def _quit(self):
self._closed = True
self._devicemanager.close()
self._servicemanager.close()
self.master.destroy()
# -----------------------------------
import select
import bluetooth
class _DeviceDiscoverer(bluetooth.DeviceDiscoverer):
def __init__(self, cb_found, cb_complete):
bluetooth.DeviceDiscoverer.__init__(self) # old-style superclass
self.cb_found = cb_found
self.cb_complete = cb_complete
def find_devices(self, lookup_names=True, duration=8, flush_cache=True):
bluetooth.DeviceDiscoverer.find_devices(self, lookup_names, duration, flush_cache)
# process until inquiry is complete
self._done = False
self._cancelled = False
while not self._done and not self._cancelled:
#print "Processed"
readfiles = [self,]
rfds = select.select(readfiles, [], [])[0]
if self in rfds:
self.process_event()
# cancel_inquiry() doesn't like getting stopped in the middle of
# process_event() maybe? so just use flag instead.
if self._cancelled:
bluetooth.DeviceDiscoverer.cancel_inquiry(self)
def cancel_inquiry(self):
self._cancelled = True
def device_discovered(self, address, deviceclass, name):
#print "device_discovered", address, deviceclass, name
if self.cb_found:
self.cb_found(address, deviceclass, name)
def inquiry_complete(self):
#print "inquiry_complete"
self._done = True
if self.cb_complete:
self.cb_complete()
# -----------------------------------
# Centres a tkinter window
def centrewindow(win):
win.update_idletasks()
xmax = win.winfo_screenwidth()
ymax = win.winfo_screenheight()
x0 = (xmax - win.winfo_reqwidth()) / 2
y0 = (ymax - win.winfo_reqheight()) / 2
win.geometry("+%d+%d" % (x0, y0))
def setupwin(rootwin, title):
# set window title
rootwin.title(title)
# place window at centre
rootwin.after_idle(centrewindow, rootwin)
rootwin.update()
# -----------------------------------
def selectdevice():
rootwin = Tk()
selector = DeviceSelector(rootwin)
setupwin(rootwin, DeviceSelector.title)
selector.run()
return selector.getresult()
def selectservice():
rootwin = Tk()
selector = ServiceSelector(rootwin)
setupwin(rootwin, ServiceSelector.title)
selector.run()
return selector.getresult()
if __name__ == "__main__":
print selectservice()
|
gpl-3.0
|
kingmotley/SickRage
|
lib/github/Legacy.py
|
72
|
7248
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Steve English <[email protected]> #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import urlparse
import github.PaginatedList
class PaginatedList(github.PaginatedList.PaginatedListBase):
def __init__(self, url, args, requester, key, convert, contentClass):
github.PaginatedList.PaginatedListBase.__init__(self)
self.__url = url
self.__args = args
self.__requester = requester
self.__key = key
self.__convert = convert
self.__contentClass = contentClass
self.__nextPage = 0
self.__continue = True
def _couldGrow(self):
return self.__continue
def _fetchNextPage(self):
page = self.__nextPage
self.__nextPage += 1
return self.get_page(page)
def get_page(self, page):
assert isinstance(page, (int, long)), page
args = dict(self.__args)
if page != 0:
args["start_page"] = page + 1
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__url,
parameters=args
)
self.__continue = len(data[self.__key]) > 0
return [
self.__contentClass(self.__requester, headers, self.__convert(element), completed=False)
for element in data[self.__key]
]
def convertUser(attributes):
convertedAttributes = {
"login": attributes["login"],
"url": "/users/" + attributes["login"],
}
if "gravatar_id" in attributes: # pragma no branch
convertedAttributes["gravatar_id"] = attributes["gravatar_id"]
if "followers" in attributes: # pragma no branch
convertedAttributes["followers"] = attributes["followers"]
if "repos" in attributes: # pragma no branch
convertedAttributes["public_repos"] = attributes["repos"]
if "name" in attributes: # pragma no branch
convertedAttributes["name"] = attributes["name"]
if "created_at" in attributes: # pragma no branch
convertedAttributes["created_at"] = attributes["created_at"]
if "location" in attributes: # pragma no branch
convertedAttributes["location"] = attributes["location"]
return convertedAttributes
def convertRepo(attributes):
convertedAttributes = {
"owner": {"login": attributes["owner"], "url": "/users/" + attributes["owner"]},
"url": "/repos/" + attributes["owner"] + "/" + attributes["name"],
}
if "pushed_at" in attributes: # pragma no branch
convertedAttributes["pushed_at"] = attributes["pushed_at"]
if "homepage" in attributes: # pragma no branch
convertedAttributes["homepage"] = attributes["homepage"]
if "created_at" in attributes: # pragma no branch
convertedAttributes["created_at"] = attributes["created_at"]
if "watchers" in attributes: # pragma no branch
convertedAttributes["watchers"] = attributes["watchers"]
if "has_downloads" in attributes: # pragma no branch
convertedAttributes["has_downloads"] = attributes["has_downloads"]
if "fork" in attributes: # pragma no branch
convertedAttributes["fork"] = attributes["fork"]
if "has_issues" in attributes: # pragma no branch
convertedAttributes["has_issues"] = attributes["has_issues"]
if "has_wiki" in attributes: # pragma no branch
convertedAttributes["has_wiki"] = attributes["has_wiki"]
if "forks" in attributes: # pragma no branch
convertedAttributes["forks"] = attributes["forks"]
if "size" in attributes: # pragma no branch
convertedAttributes["size"] = attributes["size"]
if "private" in attributes: # pragma no branch
convertedAttributes["private"] = attributes["private"]
if "open_issues" in attributes: # pragma no branch
convertedAttributes["open_issues"] = attributes["open_issues"]
if "description" in attributes: # pragma no branch
convertedAttributes["description"] = attributes["description"]
if "language" in attributes: # pragma no branch
convertedAttributes["language"] = attributes["language"]
if "name" in attributes: # pragma no branch
convertedAttributes["name"] = attributes["name"]
return convertedAttributes
def convertIssue(attributes):
convertedAttributes = {
"number": attributes["number"],
"url": "/repos" + urlparse.urlparse(attributes["html_url"]).path,
"user": {"login": attributes["user"], "url": "/users/" + attributes["user"]},
}
if "labels" in attributes: # pragma no branch
convertedAttributes["labels"] = [{"name": label} for label in attributes["labels"]]
if "title" in attributes: # pragma no branch
convertedAttributes["title"] = attributes["title"]
if "created_at" in attributes: # pragma no branch
convertedAttributes["created_at"] = attributes["created_at"]
if "comments" in attributes: # pragma no branch
convertedAttributes["comments"] = attributes["comments"]
if "body" in attributes: # pragma no branch
convertedAttributes["body"] = attributes["body"]
if "updated_at" in attributes: # pragma no branch
convertedAttributes["updated_at"] = attributes["updated_at"]
if "state" in attributes: # pragma no branch
convertedAttributes["state"] = attributes["state"]
return convertedAttributes
|
gpl-3.0
|
manashmndl/scikit-learn
|
examples/cluster/plot_dict_face_patches.py
|
337
|
2747
|
"""
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
jrslocum17/pynet_test
|
Week7/Show_int_eapi.py
|
1
|
1178
|
#!/usr/bin/env python
"""
Ex 1. Use Arista's eAPI to obtain 'show interfaces' from the switch. Parse the 'show interfaces' output to obtain the
'inOctets' and 'outOctets' fields for each of the interfaces on the switch. Accomplish this using Arista's pyeapi.
"""
import pyeapi
from pprint import pprint
def main():
pynet_sw4 = pyeapi.connect_to("pynet-sw4")
command_result = pynet_sw4.enable('show interfaces')
show_int_output = command_result[0]['result']
interface_dict = show_int_output['interfaces']
#pprint(interface_dict)
sorted_ints = sorted(interface_dict.keys())
print("")
print("{:16}{:16}{:16}".format("Interface", "InOctets", "OutOctets"))
print("{:-<16}{:-<16}{:-<16}".format("", "", ""))
for int in sorted_ints:
# Use .get() instead of a direct reference to have option of specifying a default if key not found
in_octets = interface_dict[int].get('interfaceCounters', {}).get('inOctets', "")
out_octets = interface_dict[int].get('interfaceCounters', {}).get('outOctets', "")
print("{:16}{:<16}{:<16}".format(int, in_octets, out_octets))
print("")
if __name__ == "__main__":
main()
|
apache-2.0
|
tigerneil/Theano-Lights
|
train.py
|
5
|
8043
|
import numpy as np
import time
from operator import add
from toolbox import *
from models import *
if __name__ == "__main__":
# Hyper-parameters
#--------------------------------------------------------------------------------------------------
hp = Parameters()
with hp:
batch_size = 1000
test_batch_size = 1000
train_perm = True
load_model = False
save_model = True
debug = False
# ------------------
walkforward = False
walkstep_size = 5
ws_iterations = 200
n_stepdecay = 1.0
ws_validstop = 0.02
# ------------------
#Model = ffn.FFN
#Model = ffn_bn.FFN_bn
#Model = ffn_ace.FFN_ace
#Model = ffn_lae.FFN_lae
#Model = ffn_vat.FFN_vat
Model = ffn_bn_vat.FFN_bn_vat
#Model = cnn.CNN
#Model = vae1.Vae1
#Model = cvae.Cvae
#Model = draw_at_lstm1.Draw_at_lstm1
#Model = draw_at_lstm2.Draw_at_lstm2
#Model = draw_lstm1.Draw_lstm1
#Model = draw_sgru1.Draw_sgru1
init_scale = 1.05
learning_rate = 0.003
lr_halflife = 9.5
optimizer = adamgc
description = ''
# Data
#--------------------------------------------------------------------------------------------------
data_path = 'data/'
data = mnist(path=data_path+'mnist/', nvalidation=0)
#data = mnist2(path=data_path+'mnist/')
#data = mnistBinarized(path=data_path+'mnist/') # only for UL models
#data = mnist(path=data_path+'mnist/', distort=3, shuffle=True)
#data = freyfaces(path=data_path+'frey/')
#data = downsample(data)
#visualize(-1, data['tr_X'][0:min(len(data['tr_X']), 900)], data['shape_x'])
# Training
#--------------------------------------------------------------------------------------------------
model = Model(data, hp)
print ("M: %s lr: %.5f init: %.2f batch: %d ws: %d iter: %d desc: %s" % (model.id, learning_rate, init_scale, batch_size, walkforward*walkstep_size, ws_iterations, description))
if walkforward:
# Walkforward learning
n_ws = len(data['tr_X']) / walkstep_size / batch_size
it_lr = learning_rate
for walkstep in xrange(0, n_ws):
begin = time.time()
min_validation = 100000.
#tr_outputs = model.train_walkstep(walkstep, ws_iterations, it_lr)
# Validate on previous data
for it in range(ws_iterations):
begin_inner = time.time()
tr_outputs = None
for i in xrange(0, walkstep_size):
batch_idx = walkstep * walkstep_size + i
outputs = model.train(batch_idx, it_lr)
outputs = map(lambda x: x / float(walkstep_size * batch_size), outputs)
if i==0:
tr_outputs = outputs
else:
tr_outputs = map(add, tr_outputs, outputs)
prev_va_outputs = [0.] * 100
for i in xrange(0, walkstep * walkstep_size):
outputs = model.validate(i)
outputs = map(lambda x: x / (walkstep * walkstep_size * batch_size), outputs)
prev_va_outputs = (map(add, prev_va_outputs, outputs) if i!=0 else outputs)
print(" > %d,\t%.2f,\t%.2f,\t%.2f,\t%.2f,\t%.3f,\t%.2f" % (it,
tr_outputs[model.outidx['cost_q']], prev_va_outputs[model.outidx['cost_q']],
tr_outputs[model.outidx['cost']], prev_va_outputs[model.outidx['cost']],
tr_outputs[model.outidx['norm_grad']],
time.time() - begin_inner))
# Early stopping on previous data
if prev_va_outputs[model.outidx['cost']] < min_validation:
min_validation = prev_va_outputs[model.outidx['cost']]
elif prev_va_outputs[model.outidx['cost']] > min_validation * (1. + ws_validstop):
break
te_outputs = model.test_epoch()
if model.type == 'SL':
# Supervised learning
print("%d,%.4f,%.4f,%.4f,%.4f,%.4f,%.2f" % (walkstep,
tr_outputs[model.outidx['cost']], te_outputs[model.outidx['cost']],
tr_outputs[model.outidx['error_map_pyx']], te_outputs[model.outidx['error_map_pyx']],
tr_outputs[model.outidx['norm_grad']],
time.time() - begin))
else:
# Unsupervised learning
print("%d,\t%.2f,\t%.2f,\t%.2f,\t%.2f,\t%.3f,\t%.2f" % (walkstep,
tr_outputs[model.outidx['cost_q']], te_outputs[model.outidx['cost_q']],
tr_outputs[model.outidx['cost']], te_outputs[model.outidx['cost']],
tr_outputs[model.outidx['norm_grad']],
time.time() - begin))
# Generate samples
y_samples = model.decode(36 * (model.n_t + 1))
y_samples = np.transpose(y_samples, (1,0,2)).reshape((-1, y_samples.shape[2]))
visualize(walkstep + 1, y_samples, data['shape_x'])
#it_lr = float(learning_rate / (walkstep + 1.))
#it_lr = it_lr*n_stepdecay
ws_iterations = int(ws_iterations*n_stepdecay)
model.save()
else:
# Full training data learning
n_iterations = 10000
freq_save = 20
freq_sample = 10
it_lr = learning_rate
for it in range(n_iterations):
begin = time.time()
model.permuteData(data)
tr_outputs = model.train_epoch(it_lr)
if len(data['va_X']) > 0:
te_outputs = model.validation_epoch()
else:
te_outputs = model.test_epoch()
if model.type == 'SL':
# Supervised learning
print("%d,%.4f,%.4f,%.4f,%.4f,%.2e,%.2f" % (it,
tr_outputs[model.outidx['cost']], te_outputs[model.outidx['cost']],
tr_outputs[model.outidx['error_map_pyx']], te_outputs[model.outidx['error_map_pyx']],
tr_outputs[model.outidx['norm_grad']],
time.time() - begin))
elif model.type == 'UL':
# Unsupervised learning
print("%d,%.2f,%.2f,%.2f,%.2f,%.2e,%.2f" % (it,
tr_outputs[model.outidx['cost_q']], te_outputs[model.outidx['cost_q']],
tr_outputs[model.outidx['cost']], te_outputs[model.outidx['cost']],
tr_outputs[model.outidx['norm_grad']],
time.time() - begin))
# Generate samples
if it % freq_sample == 0:
y_samples = model.decode(36 * (model.n_t + 1))
y_samples = np.transpose(y_samples, (1,0,2)).reshape((-1, y_samples.shape[2]))
visualize(it, y_samples, data['shape_x'])
# Save model parameters
if hp.save_model and it % freq_save == 0:
model.save()
it_lr = float(it_lr*np.power(0.5, 1./lr_halflife))
|
mit
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/django/contrib/sessions/backends/signed_cookies.py
|
37
|
2889
|
from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase
from django.core import signing
class SessionStore(SessionBase):
def load(self):
"""
We load the data from the key itself instead of fetching from
some external data store. Opposite of _get_session_key(),
raises BadSignature if signature fails.
"""
try:
return signing.loads(
self.session_key,
serializer=self.serializer,
# This doesn't handle non-default expiry dates, see #19201
max_age=settings.SESSION_COOKIE_AGE,
salt='django.contrib.sessions.backends.signed_cookies',
)
except Exception:
# BadSignature, ValueError, or unpickling exceptions. If any of
# these happen, reset the session.
self.create()
return {}
def create(self):
"""
To create a new key, we simply make sure that the modified flag is set
so that the cookie is set on the client for the current request.
"""
self.modified = True
def save(self, must_create=False):
"""
To save, we get the session key as a securely signed string and then
set the modified flag so that the cookie is set on the client for the
current request.
"""
self._session_key = self._get_session_key()
self.modified = True
def exists(self, session_key=None):
"""
This method makes sense when you're talking to a shared resource, but
it doesn't matter when you're storing the information in the client's
cookie.
"""
return False
def delete(self, session_key=None):
"""
To delete, we clear the session key and the underlying data structure
and set the modified flag so that the cookie is set on the client for
the current request.
"""
self._session_key = ''
self._session_cache = {}
self.modified = True
def cycle_key(self):
"""
Keeps the same data but with a new key. To do this, we just have to
call ``save()`` and it will automatically save a cookie with a new key
at the end of the request.
"""
self.save()
def _get_session_key(self):
"""
Most session backends don't need to override this method, but we do,
because instead of generating a random string, we want to actually
generate a secure url-safe Base64-encoded string of data as our
session key.
"""
return signing.dumps(
self._session, compress=True,
salt='django.contrib.sessions.backends.signed_cookies',
serializer=self.serializer,
)
@classmethod
def clear_expired(cls):
pass
|
gpl-3.0
|
waytai/networkx
|
networkx/algorithms/centrality/tests/test_dispersion.py
|
63
|
1416
|
import networkx as nx
from nose.tools import *
def small_ego_G():
"""The sample network from http://arxiv.org/pdf/1310.6753v1.pdf"""
edges=[('a','b'), ('a','c'), ('b','c'), ('b','d'),
('b', 'e'),('b','f'),('c','d'),('c','f'),('c','h'),('d','f'), ('e','f'),
('f','h'),('h','j'), ('h','k'),('i','j'), ('i','k'), ('j','k'), ('u','a'),
('u','b'), ('u','c'), ('u','d'), ('u','e'), ('u','f'), ('u','g'), ('u','h'),
('u','i'), ('u','j'), ('u','k')]
G = nx.Graph()
G.add_edges_from(edges)
return G
class TestDispersion(object):
def test_article(self):
"""our algorithm matches article's"""
G = small_ego_G()
disp_uh = nx.dispersion(G, 'u', 'h', normalized=False)
disp_ub = nx.dispersion(G, 'u', 'b', normalized=False)
assert disp_uh == 4
assert disp_ub == 1
def test_results_length(self):
"""there is a result for every node"""
G = small_ego_G()
disp = nx.dispersion(G)
disp_Gu = nx.dispersion(G, 'u')
disp_uv = nx.dispersion(G, 'u', 'h')
assert len(disp) == len(G)
assert len(disp_Gu) == len(G) - 1
assert type(disp_uv) is float
def test_impossible_things(self):
G=nx.karate_club_graph()
disp = nx.dispersion(G)
for u in disp:
for v in disp[u]:
assert disp[u][v] >= 0
|
bsd-3-clause
|
mickael-guene/gcc
|
gcc/ada/doc/share/ada_pygments.py
|
27
|
7455
|
"""Alternate Ada and Project Files parsers for Sphinx/Rest"""
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
def get_lexer_tokens(tag_highlighting=False, project_support=False):
"""Return the tokens needed for RegexLexer
:param tag_highlighting: if True we support tag highlighting. See
AdaLexerWithTags documentation
:type tag_highlighting: bool
:param project_support: if True support additional keywors associated
with project files.
:type project_support: bool
:return: a dictionary following the structure required by RegexLexer
:rtype: dict
"""
if project_support:
project_pattern = r'project\s+|'
project_pattern2 = r'project|'
else:
project_pattern = r''
project_pattern2 = r''
result = {
'root': [
# Comments
(r'--.*$', Comment),
# Character literal
(r"'.'", String.Char),
# Strings
(r'"[^"]*"', String),
# Numeric
# Based literal
(r'[0-9][0-9_]*#[0-9a-f][0-9a-f_]*#(E[\+-]?[0-9][0-9_]*)?',
Number.Integer),
(r'[0-9][0-9_]*#[0-9a-f][0-9a-f_]*'
r'\.[0-9a-f][0-9a-f_]*#(E[\+-]?[0-9][0-9_]*)?', Number.Float),
# Decimal literal
(r'[0-9][0-9_]*\.[0-9][0-9_](E[\+-]?[0-9][0-9_]*)?', Number.Float),
(r'[0-9][0-9_]*(E[\+-]?[0-9][0-9_]*)?', Number.Integer),
# Match use and with statements
# The first part of the pattern is be sure we don't match
# for/use constructs.
(r'(\n\s*|;\s*)(with|use)(\s+[\w\.]+)',
bygroups(Punctuation, Keyword.Reserved, Name.Namespace)),
# Match procedure, package and function declarations
(r'end\s+(if|loop|record)', Keyword),
(r'(package(?:\s+body)?\s+|' + project_pattern +
r'function\s+|end\s+|procedure\s+)([\w\.]+)',
bygroups(Keyword, Name.Function)),
# Ada 2012 standard attributes, GNAT specific ones and
# Spark 2014 ones ('Update and 'Loop_Entry)
# (reversed order to avoid having for
# example Max before Max_Alignment_For_Allocation).
(r'\'(Write|Width|Wide_Width|Wide_Wide_Width|Wide_Wide_Value|'
r'Wide_Wide_Image|Wide_Value|Wide_Image|Word_Size|Wchar_T_Size|'
r'Version|Value_Size|Value|Valid_Scalars|VADS_Size|Valid|Val|'
r'Update|Unrestricted_Access|Universal_Literal_String|'
r'Unconstrained_Array|Unchecked_Access|Unbiased_Rounding|'
r'Truncation|Type_Class|To_Address|Tick|Terminated|'
r'Target_Name|Tag|System_Allocator_Alignment|Succ|Stub_Type|'
r'Stream_Size|Storage_Unit|Storage_Size|Storage_Pool|Small|Size|'
r'Simple_Storage_Pool|Signed_Zeros|Scaling|Scale|'
r'Scalar_Storage_Order|Safe_Last|Safe_Large|Safe_First|'
r'Safe_Emax|Rounding|Round|Result|Remainder|Ref|Read|'
r'Range_Length|Range|Priority|Pred|'
r'Position|Pos|Pool_Address|Passed_By_Reference|Partition_Id|'
r'Overlaps_Storage|Output|Old|Object_Size|Null_Parameter|Modulus|'
r'Model_Small|Model_Mantissa|Model_Epsilon|Model_Emin|Model|Mod|'
r'Min|Mechanism_Code|Maximum_Alignment|'
r'Max_Size_In_Storage_Elements|Max_Priority|'
r'Max_Interrupt_Priority|Max_Alignment_For_Allocation|'
r'Max|Mantissa|Machine_Size|Machine_Rounds|Machine_Rounding|'
r'Machine_Radix|Machine_Overflows|Machine_Mantissa|Machine_Emin|'
r'Machine_Emax|Machine|Loop_Entry|Length|Length|Leading_Part|'
r'Last_Valid|Last_Bit|Last|Large|Invalid_Value|Integer_Value|'
r'Input|Image|Img|Identity|Has_Same_Storage|Has_Discriminants|'
r'Has_Access_Values|Fraction|Fore|Floor|Fixed_Value|First_Valid|'
r'First_Bit|First|External_Tag|Exponent|Epsilon|Enum_Val|'
r'Enum_Rep|Enabled|Emax|Elaborated|Elab_Subp_Body|Elab_Spec|'
r'Elab_Body|Descriptor_Size|Digits|Denorm|Delta|Definite|'
r'Default_Bit_Order|Count|Copy_Sign|Constrained|'
r'Compose|Component_Size|Compiler_Version|Code_Address|Class|'
r'Ceiling|Caller|Callable|Body_Version|Bit_Order|Bit_Position|'
r'Bit|Base|Asm_Output|Asm_Input|Alignment|Aft|Adjacent|'
r'Address_Size|Address|Access|Abort_Signal|AST_Entry)',
Name.Attribute),
# All Ada2012 reserved words
(r'(abort|abstract|abs|accept|access|aliased|all|and|array|at|'
r'begin|body|case|constant|declare|delay|delta|digits|do|'
r'else|elsif|end|entry|exception|exit|for|function|generic|goto|'
r'if|interface|in|is|limited|loop|mod|new|not|null|'
r'of|or|others|out|overriding|' + project_pattern2 +
r'package|pragma|private|procedure|protected|'
r'raise|range|record|rem|renames|requeue|return|reverse|'
r'select|separate|some|subtype|synchronized|'
r'tagged|task|terminate|then|type|until|use|when|while|with|xor'
r')([\s;,])',
bygroups(Keyword.Reserved, Punctuation)),
# Two characters operators
(r'=>|\.\.|\*\*|:=|/=|>=|<=|<<|>>|<>', Operator),
# One character operators
(r'&|\'|\(|\)|\*|\+|-|\.|/|:|<|=|>|\|', Operator),
(r',|;', Punctuation),
# Spaces
(r'\s+', Text),
# Builtin values
(r'False|True', Keyword.Constant),
# Identifiers
(r'[\w\.]+', Name)], }
# Insert tag highlighting before identifiers
if tag_highlighting:
result['root'].insert(-1, (r'\[[\w ]*\]', Name.Tag))
return result
class AdaLexer(RegexLexer):
"""Alternate Pygments lexer for Ada source code and project files
The default pygments lexer always fails causing disabling of syntax
highlighting in Sphinx. This lexer is simpler but safer.
In order to use this lexer in your Sphinx project add the following
code at the end of your conf.py
.. code-block:: python
import gnatpython.ada_pygments
def setup(app):
app.add_lexer('ada', gnatpython.ada_pygments.AdaLexer())
"""
name = 'Ada'
aliases = ['ada', 'ada83', 'ada95', 'ada2005', 'ada2012']
filenames = ['*.adb', '*.ads', '*.ada']
mimetypes = ['text/x-ada']
flags = re.MULTILINE | re.I # Ignore case
tokens = get_lexer_tokens()
class TaggedAdaLexer(AdaLexer):
"""Alternate Pygments lexer for Ada source code with tags
A tag is a string of the form::
[MY STRING]
Only alphanumerical characters and spaces are considered inside the
brackets.
"""
name = 'TaggedAda'
aliases = ['tagged_ada']
tokens = get_lexer_tokens(True)
class GNATProjectLexer(RegexLexer):
"""Pygment lexer for project files
This is the same as the AdaLexer but with support of ``project``
keyword.
"""
name = 'GPR'
aliases = ['gpr']
filenames = ['*.gpr']
mimetypes = ['text/x-gpr']
flags = re.MULTILINE | re.I # Ignore case
tokens = get_lexer_tokens(project_support=True)
|
gpl-2.0
|
artiya4u/thefuck
|
thefuck/rules/fix_file.py
|
6
|
2476
|
import re
import os
from thefuck.utils import memoize, wrap_settings
from thefuck import shells
# order is important: only the first match is considered
patterns = (
# js, node:
'^ at {file}:{line}:{col}',
# cargo:
'^ {file}:{line}:{col}',
# python, thefuck:
'^ File "{file}", line {line}',
# awk:
'^awk: {file}:{line}:',
# git
'^fatal: bad config file line {line} in {file}',
# llc:
'^llc: {file}:{line}:{col}:',
# lua:
'^lua: {file}:{line}:',
# fish:
'^{file} \\(line {line}\\):',
# bash, sh, ssh:
'^{file}: line {line}: ',
# cargo, clang, gcc, go, pep8, rustc:
'^{file}:{line}:{col}',
# ghc, make, ruby, zsh:
'^{file}:{line}:',
# perl:
'at {file} line {line}',
)
# for the sake of readability do not use named groups above
def _make_pattern(pattern):
pattern = pattern.replace('{file}', '(?P<file>[^:\n]+)')
pattern = pattern.replace('{line}', '(?P<line>[0-9]+)')
pattern = pattern.replace('{col}', '(?P<col>[0-9]+)')
return re.compile(pattern, re.MULTILINE)
patterns = [_make_pattern(p) for p in patterns]
@memoize
def _search(stderr):
for pattern in patterns:
m = re.search(pattern, stderr)
if m and os.path.isfile(m.group('file')):
return m
def match(command, settings):
if 'EDITOR' not in os.environ:
return False
return _search(command.stderr) or _search(command.stdout)
@wrap_settings({'fixlinecmd': '{editor} {file} +{line}',
'fixcolcmd': None})
def get_new_command(command, settings):
m = _search(command.stderr) or _search(command.stdout)
# Note: there does not seem to be a standard for columns, so they are just
# ignored by default
if settings.fixcolcmd and 'col' in m.groupdict():
editor_call = settings.fixcolcmd.format(editor=os.environ['EDITOR'],
file=m.group('file'),
line=m.group('line'),
col=m.group('col'))
else:
editor_call = settings.fixlinecmd.format(editor=os.environ['EDITOR'],
file=m.group('file'),
line=m.group('line'))
return shells.and_(editor_call, command.script)
|
mit
|
OsirisSPS/osiris-sps
|
client/share/plugins/AF9A4C281070FDB0F34CF417CDB168AB38C8A388/lib/plat-mac/lib-scriptpackages/Explorer/URL_Suite.py
|
82
|
1268
|
"""Suite URL Suite: Standard suite for Uniform Resource Locators
Level 1, version 1
Generated from /Applications/Internet Explorer.app
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'GURL'
class URL_Suite_Events:
_argmap_GetURL = {
'to' : 'dest',
}
def GetURL(self, _object, _attributes={}, **_arguments):
"""GetURL: Open the URL (and optionally save it to disk)
Required argument: URL to open
Keyword argument to: File into which to save resource located at URL.
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'GURL'
_subcode = 'GURL'
aetools.keysubst(_arguments, self._argmap_GetURL)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
#
# Indices of types declared in this module
#
_classdeclarations = {
}
_propdeclarations = {
}
_compdeclarations = {
}
_enumdeclarations = {
}
|
gpl-3.0
|
Noviat/odoo
|
addons/knowledge/__openerp__.py
|
261
|
1738
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Knowledge Management System',
'version' : '1.0',
'depends' : ['base','base_setup'],
'author' : 'OpenERP SA',
'category': 'Hidden/Dependency',
'description': """
Installer for knowledge-based Hidden.
=====================================
Makes the Knowledge Application Configuration available from where you can install
document and Wiki based Hidden.
""",
'website': 'https://www.odoo.com',
'data': [
'security/knowledge_security.xml',
'security/ir.model.access.csv',
'knowledge_view.xml',
'res_config_view.xml',
],
'demo': ['knowledge_demo.xml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
duducosmos/pgs4a
|
python-install/lib/python2.7/bsddb/db.py
|
194
|
2730
|
#----------------------------------------------------------------------
# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA
# and Andrew Kuchling. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# o Redistributions of source code must retain the above copyright
# notice, this list of conditions, and the disclaimer that follows.
#
# o Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# o Neither the name of Digital Creations nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS
# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL
# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#----------------------------------------------------------------------
# This module is just a placeholder for possible future expansion, in
# case we ever want to augment the stuff in _db in any way. For now
# it just simply imports everything from _db.
import sys
absolute_import = (sys.version_info[0] >= 3)
if not absolute_import :
if __name__.startswith('bsddb3.') :
# import _pybsddb binary as it should be the more recent version from
# a standalone pybsddb addon package than the version included with
# python as bsddb._bsddb.
from _pybsddb import *
from _pybsddb import __version__
else:
from _bsddb import *
from _bsddb import __version__
else :
# Because this syntaxis is not valid before Python 2.5
if __name__.startswith('bsddb3.') :
exec("from ._pybsddb import *")
exec("from ._pybsddb import __version__")
else :
exec("from ._bsddb import *")
exec("from ._bsddb import __version__")
|
lgpl-2.1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.