code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import thread_cert
COMMISSIONER = 1
LEADER1 = 2
ROUTER1 = 3
LEADER2 = 4
class Cert_9_2_14_PanIdQuery(thread_cert.TestCase):
SUPPORT_NCP = False
TOPOLOGY = {
COMMISSIONER: {
'mode': 'rsdn',
'panid': 0xface,
'router_selection_jitter': 1,
'whitelist': [LEADER1]
},
LEADER1: {
'mode': 'rsdn',
'panid': 0xface,
'whitelist': [COMMISSIONER, ROUTER1]
},
ROUTER1: {
'mode': 'rsdn',
'panid': 0xface,
'router_selection_jitter': 1,
'whitelist': [LEADER1, LEADER2]
},
LEADER2: {
'mode': 'rsdn',
'panid': 0xdead,
'whitelist': [ROUTER1]
},
}
def test(self):
self.nodes[LEADER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER1].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[LEADER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER2].get_state(), 'leader')
ipaddrs = self.nodes[ROUTER1].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.nodes[COMMISSIONER].panid_query(0xdead, 0xffffffff, ipaddr)
self.nodes[COMMISSIONER].panid_query(0xdead, 0xffffffff,
'ff33:0040:fd00:db8:0:0:0:1')
self.assertTrue(self.nodes[COMMISSIONER].ping(ipaddr))
if __name__ == '__main__':
unittest.main()
| lanyuwen/openthread | tests/scripts/thread-cert/Cert_9_2_14_PanIdQuery.py | Python | bsd-3-clause | 3,516 |
import datetime
from corehq.apps.accounting import generator
from corehq.apps.accounting.invoicing import DomainInvoiceFactory
from corehq.apps.accounting.models import DefaultProductPlan, BillingAccount, Subscription, SubscriptionAdjustment
from corehq.apps.accounting.tests.base_tests import BaseAccountingTest
from corehq.apps.accounting.utils import get_previous_month_date_range
class TestDomainInvoiceFactory(BaseAccountingTest):
def setUp(self):
super(TestDomainInvoiceFactory, self).setUp()
self.invoice_start, self.invoice_end = get_previous_month_date_range()
self.domain = generator.arbitrary_domain()
self.account = BillingAccount.get_or_create_account_by_domain(
domain=self.domain.name, created_by="TEST"
)[0]
self.community = DefaultProductPlan.get_default_plan_by_domain(
self.domain).plan.get_version()
generator.arbitrary_commcare_users_for_domain(
self.domain.name, self.community.user_limit + 1
)
self.invoice_factory = DomainInvoiceFactory(
self.invoice_start, self.invoice_end, self.domain
)
def _clean_subs(self):
SubscriptionAdjustment.objects.all().delete()
Subscription.objects.all().delete()
def test_feature_charges(self):
domain_under_limits = generator.arbitrary_domain()
self.assertTrue(self.community.feature_charges_exist_for_domain(self.domain))
self.assertFalse(self.community.feature_charges_exist_for_domain(domain_under_limits))
def test_incomplete_starting_coverage(self):
some_plan = generator.arbitrary_subscribable_plan()
subscription = Subscription.new_domain_subscription(
self.account, self.domain, some_plan,
date_start=self.invoice_start + datetime.timedelta(days=3)
)
subscriptions = self.invoice_factory.get_subscriptions()
community_ranges = self.invoice_factory.get_community_ranges(subscriptions)
self.assertEqual(len(community_ranges), 1)
self.assertEqual(community_ranges[0][0], self.invoice_start)
self.assertEqual(community_ranges[0][1], subscription.date_start)
self._clean_subs()
def test_incomplete_ending_coverage(self):
some_plan = generator.arbitrary_subscribable_plan()
subscription = Subscription.new_domain_subscription(
self.account, self.domain, some_plan,
date_start=self.invoice_start,
date_end=self.invoice_end - datetime.timedelta(days=3)
)
subscriptions = self.invoice_factory.get_subscriptions()
community_ranges = self.invoice_factory.get_community_ranges(subscriptions)
self.assertEqual(len(community_ranges), 1)
self.assertEqual(community_ranges[0][0], subscription.date_end)
self.assertEqual(community_ranges[0][1],
self.invoice_end + datetime.timedelta(days=1))
self._clean_subs()
def test_patchy_coverage(self):
some_plan = generator.arbitrary_subscribable_plan()
middle_date = self.invoice_end - datetime.timedelta(days=15)
Subscription.new_domain_subscription(
self.account, self.domain, some_plan,
date_start=self.invoice_start + datetime.timedelta(days=1),
date_end=middle_date
)
next_start = middle_date + datetime.timedelta(days=2)
next_end = next_start + datetime.timedelta(days=2)
Subscription.new_domain_subscription(
self.account, self.domain, some_plan,
date_start=next_start,
date_end=next_end,
)
final_start = next_end + datetime.timedelta(days=2)
Subscription.new_domain_subscription(
self.account, self.domain, some_plan,
date_start=final_start,
date_end=self.invoice_end - datetime.timedelta(days=1),
)
subscriptions = self.invoice_factory.get_subscriptions()
self.assertEqual(len(subscriptions), 3)
community_ranges = self.invoice_factory.get_community_ranges(subscriptions)
self.assertEqual(len(community_ranges), 4)
self._clean_subs()
def test_full_coverage(self):
some_plan = generator.arbitrary_subscribable_plan()
Subscription.new_domain_subscription(
self.account, self.domain, some_plan,
date_start=self.invoice_start,
date_end=self.invoice_end + datetime.timedelta(days=1),
)
subscriptions = self.invoice_factory.get_subscriptions()
community_ranges = self.invoice_factory.get_community_ranges(subscriptions)
self.assertEqual(len(community_ranges), 0)
self._clean_subs()
def test_no_coverage(self):
subscriptions = self.invoice_factory.get_subscriptions()
self.assertEqual(len(subscriptions), 0)
community_ranges = self.invoice_factory.get_community_ranges(subscriptions)
self.assertEqual(len(community_ranges), 1)
| SEL-Columbia/commcare-hq | corehq/apps/accounting/tests/test_invoice_factory.py | Python | bsd-3-clause | 5,026 |
from gelato.models.addons import AddonBase, Category, AddonCategoryBase, AddonUser
| washort/gelato.admin | addons/models.py | Python | bsd-3-clause | 83 |
# -*- coding: utf-8 -*-
"""
envoy.core
~~~~~~~~~~
This module provides envoy awesomeness.
Copyright 2012, Kenneth Reitz.
MIT Licensed.
"""
import os
import shlex
import subprocess
import threading
__version__ = '0.0.2'
__license__ = 'MIT'
__author__ = 'Kenneth Reitz'
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
self.out = None
self.err = None
self.returncode = None
self.data = None
def run(self, data, timeout, env):
self.data = data
environ = dict(os.environ).update(env or {})
def target():
self.process = subprocess.Popen(self.cmd,
universal_newlines=True,
shell=False,
env=environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
)
self.out, self.err = self.process.communicate(self.data)
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
self.returncode = self.process.returncode
return self.out, self.err
class ConnectedCommand(object):
def __init__(self,
process=None,
std_in=None,
std_out=None,
std_err=None):
self._process = process
self.std_in = std_in
self.std_out = std_out
self.std_err = std_out
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.kill()
@property
def status_code(self):
"""The status code of the process.
If the code is None, assume that it's still running.
"""
if self._status_code is not None:
return self._status_code
# investigate
return None
@property
def pid(self):
"""The process' PID."""
return self._process.pid
def kill(self):
"""Kills the process."""
return self._process.kill()
def expect(self, bytes, stream=None):
"""Block until given bytes appear in the stream."""
if stream is None:
stream = self.std_out
pass
def send(self, end='\n'):
"""Sends a line to std_in."""
#TODO: Y U LINE BUFFER
pass
def block(self):
"""Blocks until command finishes. Returns Response instance."""
self._status_code = self._process.wait()
class Response(object):
"""A command's response"""
def __init__(self, process=None):
super(Response, self).__init__()
self._process = process
self.command = None
self.std_err = None
self.std_out = None
self.status_code = None
self.history = []
def __repr__(self):
if len(self.command):
return '<Response [{0}]>'.format(self.command[0])
else:
return '<Response>'
def expand_args(command):
"""Parses command strings and returns a Popen-ready list."""
# Prepare arguments.
if isinstance(command, basestring):
splitter = shlex.shlex(command, posix=True)
splitter.whitespace = '|'
splitter.whitespace_split = True
command = []
while True:
token = splitter.get_token()
if token:
command.append(token)
else:
break
command = map(shlex.split, command)
return command
def run(command, data=None, timeout=None, env=None):
"""Executes a given commmand and returns Response.
Blocks until process is complete, or timeout is reached.
"""
command = expand_args(command)
history = []
for c in command:
if history:
# due to broken pipe problems pass only first 10MB
data = history[-1].std_out[0:10*1024]
cmd = Command(c)
out, err = cmd.run(data, timeout, env)
r = Response(process=cmd)
r.command = c
r.std_out = out
r.std_err = err
r.status_code = cmd.returncode
history.append(r)
r = history.pop()
r.history = history
return r
def connect(command, data=None, env=None):
"""Spawns a new process from the given command."""
# TODO: support piped commands
command_str = expand_args(command).pop()
environ = dict(os.environ).update(env or {})
process = subprocess.Popen(command_str,
universal_newlines=True,
shell=False,
env=environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
)
return ConnectedCommand(process=process)
| guidowb/buildpack-python | test/distutils/envoy.py | Python | mit | 4,797 |
import requests
def run(data, settings):
message = data['payload']
key = None
if message.startswith('wfh'):
key = 'wfh'
elif message.startswith('anubis'):
key = 'anu'
else:
return None
url = "http://api.xxiivv.com/?key={0}&cmd=read".format(key)
r = requests.get(url)
data = r.json()
if 'server' in message:
return "There are {} active servers.".format(len(data['servers']))
elif 'player count' in message:
return "There are {} players.".format(
reduce(
lambda acc, val: acc + int(val['players']),
data['servers'],
0
))
elif 'games' in message:
return "There are {} games.".format(data['activegames'])
| cblgh/tenyks-contrib | src/tenyksscripts/scripts/orihaus_games.py | Python | mit | 770 |
# The number of elements to print for dataset ids/tasks
_print_threshold = 10
def get_print_threshold() -> int:
"""Return the printing threshold for datasets.
The print threshold is the number of elements from ids/tasks to
print when printing representations of `Dataset` objects.
Returns
----------
threshold: int
Number of elements that will be printed
"""
return _print_threshold
def set_print_threshold(threshold: int):
"""Set print threshold
The print threshold is the number of elements from ids/tasks to
print when printing representations of `Dataset` objects.
Parameters
----------
threshold: int
Number of elements to print.
"""
global _print_threshold
_print_threshold = threshold
# If a dataset contains more than this number of elements, it won't
# print any dataset ids
_max_print_size = 1000
def get_max_print_size() -> int:
"""Return the max print size for a dataset.
If a dataset is large, printing `self.ids` as part of a string
representation can be very slow. This field controls the maximum
size for a dataset before ids are no longer printed.
Returns
-------
max_print_size: int
Maximum length of a dataset for ids to be printed in string
representation.
"""
return _max_print_size
def set_max_print_size(max_print_size: int):
"""Set max_print_size
If a dataset is large, printing `self.ids` as part of a string
representation can be very slow. This field controls the maximum
size for a dataset before ids are no longer printed.
Parameters
----------
max_print_size: int
Maximum length of a dataset for ids to be printed in string
representation.
"""
global _max_print_size
_max_print_size = max_print_size
| deepchem/deepchem | deepchem/utils/debug_utils.py | Python | mit | 1,744 |
from google.appengine.ext import ndb
class Account(ndb.Model):
"""
Accounts represent accounts people use on TBA.
"""
# Set by login/registration
# Not editable by the user
email = ndb.StringProperty()
nickname = ndb.StringProperty()
registered = ndb.BooleanProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True, indexed=False)
# These optional properties are editable by the user
display_name = ndb.StringProperty()
| bvisness/the-blue-alliance | models/account.py | Python | mit | 521 |
from __future__ import unicode_literals
import collections
from django import template
from django.template import VariableDoesNotExist
from six import text_type, string_types
from djangoseo.base import get_metadata, get_linked_metadata
register = template.Library()
class MetadataNode(template.Node):
def __init__(self, metadata_name, variable_name, target, site, language):
self.metadata_name = metadata_name
self.variable_name = variable_name
self.target = template.Variable(target or 'request.path')
self.site = site and template.Variable(site) or None
self.language = language and template.Variable(language) or None
def render(self, context):
try:
target = self.target.resolve(context)
except VariableDoesNotExist:
msg = ("{% get_metadata %} needs some path information.\n"
"Please use RequestContext with the django.core.context_"
"processors.request context processor.\nOr provide a path "
"or object explicitly, eg {% get_metadata for path %} or "
"{% get_metadata for object %}")
raise template.TemplateSyntaxError(msg)
else:
if isinstance(target, collections.Callable):
target = target()
if isinstance(target, string_types):
path = target
elif hasattr(target, 'get_absolute_url'):
path = target.get_absolute_url()
elif hasattr(target, "__iter__") and 'get_absolute_url' in target:
path = target['get_absolute_url']()
else:
path = None
kwargs = {}
# If a site is given, pass that on
if self.site:
kwargs['site'] = self.site.resolve(context)
# If a language is given, pass that on
if self.language:
kwargs['language'] = self.language.resolve(context)
metadata = None
# If the target is a django model object
if hasattr(target, 'pk'):
metadata = get_linked_metadata(target, self.metadata_name, context,
**kwargs)
if not isinstance(path, string_types):
path = None
if not metadata:
# Fetch the metadata
try:
metadata = get_metadata(path, self.metadata_name, context,
**kwargs)
except Exception as e:
raise template.TemplateSyntaxError(e)
# If a variable name is given, store the result there
if self.variable_name is not None:
context.dicts[0][self.variable_name] = metadata
return ''
else:
return text_type(metadata)
def do_get_metadata(parser, token):
"""
Retrieve an object which can produce (and format) metadata.
{% get_metadata [for my_path] [in my_language] [on my_site]
[as my_variable] %}
or if you have multiple metadata classes:
{% get_metadata MyClass [for my_path] [in my_language] [on my_site]
[as my_variable] %}
"""
bits = list(token.split_contents())
tag_name = bits[0]
bits = bits[1:]
metadata_name = None
args = {'as': None, 'for': None, 'in': None, 'on': None}
# If there are an even number of bits,
# a metadata name has been provided.
if len(bits) % 2:
metadata_name = bits[0]
bits = bits[1:]
# Each bits are in the form "key value key value ..."
# Valid keys are given in the 'args' dict above.
while len(bits):
if len(bits) < 2 or bits[0] not in args:
raise template.TemplateSyntaxError(
"expected format is '%r [as <variable_name>]'" % tag_name)
key, value, bits = bits[0], bits[1], bits[2:]
args[key] = value
return MetadataNode(
metadata_name,
variable_name=args['as'],
target=args['for'],
site=args['on'],
language=args['in'],
)
register.tag('get_metadata', do_get_metadata)
| romansalin/django-seo | djangoseo/templatetags/seo.py | Python | mit | 4,087 |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from emcee.autocorr import AutocorrError, integrated_time
def get_chain(seed=1234, ndim=3, N=100000):
np.random.seed(seed)
a = 0.9
x = np.empty((N, ndim))
x[0] = np.zeros(ndim)
for i in range(1, N):
x[i] = x[i - 1] * a + np.random.rand(ndim)
return x
def test_1d(seed=1234, ndim=1, N=250000):
x = get_chain(seed=seed, ndim=ndim, N=N)
tau = integrated_time(x)
assert np.all(np.abs(tau - 19.0) / 19.0 < 0.2)
def test_nd(seed=1234, ndim=3, N=150000):
x = get_chain(seed=seed, ndim=ndim, N=N)
tau = integrated_time(x)
assert np.all(np.abs(tau - 19.0) / 19.0 < 0.2)
def test_too_short(seed=1234, ndim=3, N=100):
x = get_chain(seed=seed, ndim=ndim, N=N)
with pytest.raises(AutocorrError):
integrated_time(x)
tau = integrated_time(x, quiet=True) # NOQA
def test_autocorr_multi_works():
np.random.seed(42)
xs = np.random.randn(16384, 2)
# This throws exception unconditionally in buggy impl's
acls_multi = integrated_time(xs)
acls_single = np.array(
[integrated_time(xs[:, i]) for i in range(xs.shape[1])]
)
assert np.all(np.abs(acls_multi - acls_single) < 2)
| joezuntz/emcee | src/emcee/tests/unit/test_autocorr.py | Python | mit | 1,236 |
__author__ = 'dwae'
| gameduell/dslib | dslib/util/__init__.py | Python | mit | 20 |
"""
Book: Building RESTful Python Web Services
Chapter 3: Improving and adding authentication to an API with Django
Author: Gaston C. Hillar - Twitter.com/gastonhillar
Publisher: Packt Publishing Ltd. - http://www.packtpub.com
"""
"""gamesapi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
urlpatterns = [
url(r'^', include('games.urls')),
]
| xianjunzhengbackup/code | http/REST/Building-RESTful-Python-Web-Services-master/Chapter 3/restful_python_chapter_03_03/gamesapi/gamesapi/urls.py | Python | mit | 972 |
import unittest
from conans.test.utils.tools import TestServer, TestClient
from conans.model.version import Version
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from collections import OrderedDict
class VersionCheckTest(unittest.TestCase):
def check_versions_test(self):
# Client deprecated
self.servers = {"default": self._get_server(10, 5)}
self.client = TestClient(servers=self.servers,
users={"default": [("lasote", "mypass")]}, client_version=4)
errors = self.client.run("search something -r default", ignore_error=True)
self.assertIn("Your conan's client version is deprecated for the current remote (v10). "
"Upgrade conan client.", self.client.user_io.out)
self.assertTrue(errors) # Not Errors
# Client outdated
self.servers = {"default": self._get_server(10, 4)}
self.client = TestClient(servers=self.servers,
users={"default": [("lasote", "mypass")]}, client_version=4)
errors = self.client.run("search something -r default", ignore_error=False)
self.assertIn(" A new conan version (v10) is available in current remote. Please, "
"upgrade conan client to avoid deprecation.", self.client.user_io.out)
self.assertFalse(errors) # Not Errors
# Client ok
self.servers = {"default": self._get_server(10, 4)}
self.client = TestClient(servers=self.servers,
users={"default": [("lasote", "mypass")]}, client_version=10)
errors = self.client.run("search something -r default", ignore_error=False)
self.assertNotIn("conan client", self.client.user_io.out)
self.assertFalse(errors) # Not Errors
# Server outdated
self.servers = {"default": self._get_server(1, 1)}
self.client = TestClient(servers=self.servers,
users={"default": [("lasote", "mypass")]}, client_version=10,
min_server_compatible_version=1)
errors = self.client.run("search something -r default", ignore_error=True)
self.assertNotIn("The conan remote version is outdated (v1). Please, contact"
" with your system administrator and upgrade the remote to"
" avoid deprecation", self.client.user_io.out)
self.assertFalse(errors) # No Errors
# Server deprecated
self.servers = {"default": self._get_server(1, 1)}
self.client = TestClient(servers=self.servers,
users={"default": [("lasote", "mypass")]}, client_version=10,
min_server_compatible_version=2)
errors = self.client.run("search something -r default", ignore_error=True)
self.assertIn("Your conan's client is incompatible with this remote."
" The server is deprecated. "
"(v1). Please, contact with your system administrator and"
" upgrade the server.",
self.client.user_io.out)
self.assertTrue(errors) # Errors
def check_multi_server_test(self):
# Check what happen if we have 2 servers and one is outdated
# The expected behavior: If we specify the remote with (-r), the commmand will fail
# if the client fot that remote is outdated. If we are looking for a package (not with -r)
# the client will look for the package on each remote.
# Client deprecated for "the_last_server" but OK for "normal_server"
self.servers = OrderedDict([("the_last_server", self._get_server(10, 4)),
("normal_server", self._get_server(4, 2))])
# First upload a package ok with an ok client
tmp_client = TestClient(servers=self.servers,
users={"normal_server": [("lasote", "mypass")],
"the_last_server": [("lasote", "mypass")]},
client_version=4)
files = cpp_hello_conan_files("Hello0", "0.1", build=False)
tmp_client.save(files)
tmp_client.run("export . lasote/stable")
errors = tmp_client.run("upload Hello0/0.1@lasote/stable -r normal_server --all")
errors |= tmp_client.run("upload Hello0/0.1@lasote/stable -r the_last_server --all")
self.assertFalse(errors)
tmp_client.run("remote remove_ref Hello0/0.1@lasote/stable")
# Now with a conflictive client...try to look in servers
self.client = TestClient(servers=self.servers,
users={"normal_server": [("lasote", "mypass")],
"the_last_server": [("lasote", "mypass")]},
client_version=2)
errors = self.client.run("search something -r the_last_server", ignore_error=True)
self.assertIn("Your conan's client version is deprecated for the current remote (v10). "
"Upgrade conan client.", self.client.user_io.out)
self.assertTrue(errors) # Errors
errors = self.client.run("install Hello0/0.1@lasote/stable --build missing",
ignore_error=True)
self.assertIn("Your conan's client version is deprecated for the current remote (v10). "
"Upgrade conan client.", self.client.user_io.out)
self.assertTrue(errors) # Errors! because it fails in the first remote
def _get_server(self, server_version, min_client_compatible_version):
server_version = str(server_version)
min_client_compatible_version = str(min_client_compatible_version)
return TestServer(
[], # write permissions
users={"lasote": "mypass"},
server_version=Version(server_version),
min_client_compatible_version=Version(min_client_compatible_version))
| birsoyo/conan | conans/test/integration/version_check_test.py | Python | mit | 6,100 |
"""
WSGI config for cloudpebble project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "cloudpebble.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cloudpebble.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| pebble/cloudpebble | cloudpebble/wsgi.py | Python | mit | 1,469 |
from itertools import islice
from math import exp
from math import log
import random
import unittest
from rasmus import stats
from rasmus import util
from rasmus.gnuplot import Gnuplot
from rasmus.testing import make_clean_dir
from rasmus import hmm
#=============================================================================
def make_coin_model(t=.1, e=.9):
def trans(pos1, state1, pos2, state2):
if state1 == state2:
return log(1.0 - t)
else:
return log(t)
def emit(pos, state):
if state == 0:
if random.random() < e:
return "H"
else:
return "T"
elif state == 1:
if random.random() < 1 - e:
return "H"
else:
return "T"
def prob_emission_data(state, data):
if state == 0:
if data == "H":
return log(e)
else:
return log(1-e)
elif state == 1:
if data == "H":
return log(1-e)
else:
return log(e)
model = hmm.HMM()
model.set_callbacks(get_num_states=lambda pos: 2,
prob_prior=lambda pos, state: log(.5),
prob_transition=trans,
emit=emit)
model.prob_emission_data = prob_emission_data
return model
class Test (unittest.TestCase):
def test_coin(self):
"""Test that viterbi and posterior coding work well."""
outdir = 'test/tmp/test_hmm/test_coin/'
make_clean_dir(outdir)
model = make_coin_model()
# sample states
ndata = 100
states = list(islice(hmm.sample_hmm_states(model), ndata))
p = Gnuplot()
p.enableOutput(False)
p.plot(states, style="lines")
# sample data
data = list(hmm.sample_hmm_data(model, states))
# viterbi
model.prob_emission = (lambda pos, state:
model.prob_emission_data(state, data[pos]))
states2 = hmm.viterbi(model, len(data))
# posterior
probs = hmm.get_posterior_probs(model, len(data))
states3 = [exp(probs[i][1]) for i in xrange(len(data))]
# assert that inferences correlates with true state
self.assertTrue(stats.corr(states, states2) > .5)
self.assertTrue(stats.corr(states, states3) > .5)
# plot inference
p.plot(util.vadds(states2, 1.5), style="lines", miny=-1, maxy=4)
p.plot(util.vadds(states3, 2.5), style="lines", miny=-1, maxy=4)
p.enableOutput(True)
p.save(outdir + 'plot.png')
def test_coin_sample_post(self):
"""Test sampling from posterior distribution"""
outdir = 'test/tmp/test_hmm/test_coin_sample_post/'
make_clean_dir(outdir)
model = make_coin_model()
# sample states and data
ndata = 100
states = list(islice(hmm.sample_hmm_states(model), ndata))
data = list(hmm.sample_hmm_data(model, states))
model.prob_emission = (lambda pos, state:
model.prob_emission_data(state, data[pos]))
p = Gnuplot()
p.enableOutput(False)
p.plot(states, style="lines")
probs = hmm.get_posterior_probs(model, len(data))
states2 = [exp(probs[i][1]) for i in xrange(len(data))]
p.plot(util.vadds(states2, 1.5), style="lines", miny=-1, maxy=12)
for i in range(2, 10):
states2 = hmm.sample_posterior(model, ndata)
self.assertTrue(stats.corr(states, states2) > .5)
p.plot(util.vadds(states2, 1.5*i), style="lines", miny=-1, maxy=12)
p.enableOutput(True)
p.save(outdir + 'plot.png')
def test_coin_post(self):
"""Test that posterior decoding."""
model = make_coin_model()
# sample states and data
ndata = 100
states = list(islice(hmm.sample_hmm_states(model), ndata))
data = list(hmm.sample_hmm_data(model, states))
model.prob_emission = (lambda pos, state:
model.prob_emission_data(state, data[pos]))
probs = hmm.get_posterior_probs(model, len(data))
for col in probs:
p = sum(map(exp, col))
self.assertAlmostEqual(p, 1.0)
| abhishekgahlot/compbio | test/rasmus/test_hmm.py | Python | mit | 4,355 |
#
# Chris Lumens <[email protected]>
#
# Copyright 2005, 2006, 2007 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import warnings
from pykickstart.errors import KickstartDeprecationWarning
from pykickstart.version import FC3
from pykickstart.base import KickstartCommand
from pykickstart.options import KSOptionParser
from pykickstart.i18n import _
class FC3_ZeroMbr(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=110, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.op = self._getParser()
self.zerombr = kwargs.get("zerombr", False)
def __str__(self):
retval = KickstartCommand.__str__(self)
if self.zerombr:
retval += "# Clear the Master Boot Record\nzerombr\n"
return retval
def _getParser(self):
op = KSOptionParser(prog="zerombr", description="""
If zerombr is specified, any disks whose formatting
is unrecognized are initialized. This will destroy
all of the contents of disks with invalid partition
tables or other formatting unrecognizable to the
installer. It is useful so that the installation
program does not ask if it should initialize the
disk label if installing to a brand new hard drive.
""", version=FC3)
return op
def parse(self, args):
extra = self.op.parse_known_args(args=args, lineno=self.lineno)[1]
if extra:
warnings.warn(_("Ignoring deprecated option on line %s: The zerombr command no longer takes any options. In future releases, this will result in a fatal error from kickstart. Please modify your kickstart file to remove any options.") % self.lineno, KickstartDeprecationWarning)
self.zerombr = True
return self
class F9_ZeroMbr(FC3_ZeroMbr):
removedKeywords = FC3_ZeroMbr.removedKeywords
removedAttrs = FC3_ZeroMbr.removedAttrs
def parse(self, args):
self.op.parse_args(args=args, lineno=self.lineno)
self.zerombr = True
return self
| bcl/pykickstart | pykickstart/commands/zerombr.py | Python | gpl-2.0 | 3,174 |
# -*- coding: utf-8 -*-
#
# Copyright © 2008 Ricky Zhou
# Copyright © 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details. You should have
# received a copy of the GNU General Public License along with this program;
# if not, write to the Free Software Foundation, Inc., 51 Franklin Street,
# Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks that are
# incorporated in the source code or documentation are not subject to the GNU
# General Public License and may only be used or replicated with the express
# permission of Red Hat, Inc.
#
# Author(s): Ricky Zhou <[email protected]>
# Mike McGrath <[email protected]>
# Toshio Kuratomi <[email protected]>
#
'''Collection of validators for parameters coming to FAS URLs.'''
# Validators don't need an __init__ method (W0232)
# Validators are following an API specification so need methods that otherwise
# would be functions (R0201)
# Validators will usu. only have two methods (R0903)
# pylint: disable-msg=W0232,R0201,R0903
# Disabled inline for specific cases:
# Validators will have a variable "state" that is very seldom used (W0163)
# Validator methods don't really need docstrings since the validator docstring
# pretty much covers it (C0111)
import re
from turbogears import validators, config
from sqlalchemy.exc import InvalidRequestError
from fas.util import available_languages
from fas.model import People, Groups
from sys import modules
try:
import pwquality
except ImportError:
pass
### HACK: TurboGears/FormEncode requires that we use a dummy _ function for
# error messages.
# http://docs.turbogears.org/1.0/Internationalization#id13
def _(s):
return s
class KnownGroup(validators.FancyValidator):
'''Make sure that a group already exists'''
messages = {'no_group': _("The group '%(group)s' does not exist.")}
def _to_python(self, value, state):
# pylint: disable-msg=C0111,W0613
return value.strip()
def validate_python(self, value, state):
# pylint: disable-msg=C0111
try:
# Just make sure the group already exists
# pylint: disable-msg=W0612
group = Groups.by_name(value)
except InvalidRequestError:
raise validators.Invalid(self.message('no_group', state, group=value),
value, state)
class UnknownGroup(validators.FancyValidator):
'''Make sure that a group doesn't already exist'''
messages = {'exists': _("The group '%(group)s' already exists.")}
def _to_python(self, value, state):
# pylint: disable-msg=C0111,W0613
return value.strip()
def validate_python(self, value, state):
# pylint: disable-msg=C0111
try:
# Just make sure the group doesn't already exist
# pylint: disable-msg=W0612
group = Groups.by_name(value)
except InvalidRequestError:
pass
else:
raise validators.Invalid(self.message('exists', state, group=value),
value, state)
class ValidGroupType(validators.FancyValidator):
'''Make sure that a group type is valid'''
messages = {'invalid_type': _('Invalid group type: %(type)s.')}
def _to_python(self, value, state):
# pylint: disable-msg=C0111,W0613
return value.strip()
def validate_python(self, value, state):
# pylint: disable-msg=C0111
if value not in ('system', 'bugzilla', 'cla', 'cvs', 'bzr', 'git', \
'hg', 'mtn', 'svn', 'shell', 'torrent', 'tracker', \
'tracking', 'user', 'pkgdb'):
raise validators.Invalid(self.message('invalid_type', state, type=value),
value, state)
class ValidRoleSort(validators.FancyValidator):
'''Make sure that a role sort key is valid'''
def _to_python(self, value, state):
# pylint: disable-msg=C0111,W0613
return value.strip()
def validate_python(self, value, state):
# pylint: disable-msg=C0111
if value not in ('username', 'sponsor', 'role_type', 'role_status', \
'creation', 'approval'):
raise validators.Invalid(_("Invalid sort key.") % value,
value, state)
class KnownUser(validators.FancyValidator):
'''Make sure that a user already exists'''
messages = {'no_user': _("'%(user)s' does not exist.")}
def _to_python(self, value, state):
# pylint: disable-msg=C0111,W0613
return value.strip()
def validate_python(self, value, state):
# pylint: disable-msg=C0111
try:
# just prove that we can retrieve a person for the username
# pylint: disable-msg=W0612
people = People.by_username(value)
except InvalidRequestError:
raise validators.Invalid(self.message('no_user', state, user=value),
value, state)
class UnknownUser(validators.FancyValidator):
'''Make sure that a user doesn't already exist'''
messages = {'create_error': _("Error: Could not create - '%(user)s'"),
'exists': _("'%(user)s' already exists.")}
def _to_python(self, value, state):
# pylint: disable-msg=C0111,W0613
return value.strip()
def validate_python(self, value, state):
# pylint: disable-msg=C0111
try:
# just prove that we *cannot* retrieve a person for the username
# pylint: disable-msg=W0612
people = People.by_username(value)
except InvalidRequestError:
return
except:
raise validators.Invalid(self.message('create_error', state, user=value),
value, state)
raise validators.Invalid(self.message('exists', state, user=value),
value, state)
class NonFedoraEmail(validators.FancyValidator):
'''Make sure that an email address is not @fedoraproject.org'''
messages = {'no_loop': _('To prevent email loops, your email address'
' cannot be @fedoraproject.org.')}
def _to_python(self, value, state):
# pylint: disable-msg=C0111,W0613
return value.strip()
def validate_python(self, value, state):
# pylint: disable-msg=C0111
if value.endswith('@fedoraproject.org'):
raise validators.Invalid(self.message('no_loop', state), value, state)
class MaybeFloat(validators.FancyValidator):
''' Make sure the float value is a valid float value (or None) '''
messages = {'no_float': _('Error - Not a valid float value: %(value)s')}
def _to_python(self, value, state):
# pylint: disable-msg=C0111,W0613
if value is None:
return None
else:
return float(value)
def validate_python(self, value, state):
if value is None:
return
try:
float(value)
except:
raise validators.Invalid(self.message('no_float', state,
value=value), value, state)
class ValidGPGKeyID(validators.UnicodeString):
''' Ensure that the GPG key id is a hex number, maybe containing spaces.
'''
messages = {'invalid_key':
_('Error - Invalid character in GPG key id: %(char)s')}
def validate_python(self, value, state):
VALID_CHARS = "0123456789abcdefABCDEF "
for char in value:
if char not in VALID_CHARS:
raise validators.Invalid(self.message('invalid_key',
state, char=char),
value, state)
class ValidSSHKey(validators.FancyValidator):
''' Make sure the ssh key uploaded is valid '''
messages = {'invalid_key': _('Error - Not a valid RSA SSH key: %(key)s')}
def _to_python(self, value, state):
# pylint: disable-msg=C0111,W0613
return value.file.read().decode('utf-8')
def validate_python(self, value, state):
# pylint: disable-msg=C0111
# value = value.file.read()
keylines = value.split('\n')
for keyline in keylines:
if not keyline:
continue
keyline = keyline.strip()
validline = re.match('^(rsa|ssh-rsa) [ \t]*[^ \t]+.*$', keyline)
if not validline:
raise validators.Invalid(self.message('invalid_key', state,
key=keyline), value, state)
class ValidUsername(validators.FancyValidator):
'''Make sure that a username isn't blacklisted'''
username_regex = re.compile(r'^[a-z][a-z0-9]+$')
username_blacklist = config.get('username_blacklist').split(',')
messages = {'invalid_username': _("'%(username)s' is an illegal username. "
"A valid username must be ASCII, only contain lowercase alphanumeric "
"characters, and must start with a letter."),
'blacklist': _("'%(username)s' is an blacklisted username. Please "
"choose a different one.")}
def _to_python(self, value, state):
# pylint: disable-msg=C0111,W0613
return value.strip()
def validate_python(self, value, state):
# pylint: disable-msg=C0111
if not self.username_regex.match(value):
raise validators.Invalid(self.message('invalid_username', state,
username=value), value, state)
if value in self.username_blacklist:
raise validators.Invalid(self.message('blacklist', state, username=value),
value, state)
class ValidLanguage(validators.FancyValidator):
'''Make sure that a username isn't blacklisted'''
messages = {'not_available': _("The language '%(lang)s' is not available.")}
def _to_python(self, value, state):
# pylint: disable-msg=C0111,W0613
return value.strip()
def validate_python(self, value, state):
# pylint: disable-msg=C0111
if value not in available_languages() + ['C']:
raise validators.Invalid(self.message('not_available', state, lang=value),
value, state)
class PasswordStrength(validators.UnicodeString):
'''Make sure that a password meets our strength requirements'''
messages = {'strength': _('Passwords must meet certain strength requirements. If they have a mix of symbols, upper and lowercase letters, and digits they must be at least 9 characters. If they have a mix of upper and lowercase letters and digits they must be at least 10 characters. If they have lowercase letters and digits, they must be at least 12 characters. Letters alone need to have at least 3 different characters and be 20 or more characters in length.'),
'xkcd': _('Malicious hackers read xkcd, you know'),
'pwquality': _(r'libpwquality reports this is a weak password: %(pwq_msg)s'),}
def validate_python(self, value, state):
# http://xkcd.com/936/
if value.lower() in (u'correct horse battery staple',
u'correcthorsebatterystaple', u'tr0ub4dor&3'):
raise validators.Invalid(self.message('xkcd', state), value, state)
if "pwquality" in modules:
try:
pw_quality = pwquality.PWQSettings()
pw_quality.read_config()
pw_quality.check(value, None, None)
except pwquality.PWQError as (e, msg):
raise validators.Invalid(self.message('pwquality', state) % {'pwq_msg': msg}, value, state)
diversity = set(value)
if len(diversity) < 2:
raise validators.Invalid(self.message('strength', state),
value, state)
length = len(value)
if length >= 20:
return
if length < 9:
raise validators.Invalid(self.message('strength', state),
value, state)
lower = upper = digit = space = symbol = False
for c in value:
if c.isalpha():
if c.islower():
lower = True
else:
upper = True
elif c.isdigit():
digit = True
elif c.isspace():
space = True
else:
symbol = True
if upper and lower and digit and symbol:
if length >= 9:
return
elif upper and lower and (digit or symbol):
if length >= 10:
return
elif (lower or upper) and (digit or symbol):
if length >= 12:
return
raise validators.Invalid(self.message('strength', state), value, state)
class ValidHumanWithOverride(validators.FancyValidator):
messages = { 'initial': _('You must include the full form of your names, not just initials. If your fullname really has one letter portions, you may check the override checkbox to submit this name.')}
def __init__(self, name_field, override_field):
super(validators.FancyValidator, self).__init__()
self.name_field = name_field
self.override = override_field
def validate_python(self, values, state):
errors = {}
# If override is set, then we skip the rest of testing
if values.get(self.override, False):
return
# Check for initials, only first or last name etc.
name = values.get(self.name_field)
name_regex = re.compile(r'^\S{2}\S*\b.*\b\S{2}\S*$', flags=re.UNICODE)
if not name_regex.match ( name ):
errors[self.name_field] = self.message('initial', state)
# raise errors
if errors:
error_list = errors.items()
error_list.sort()
error_message = '<br>\n'.join(['%s: %s' % (name, values) for name, values in error_list])
raise validators.Invalid(error_message, values, state, error_dict=errors)
| bstinsonmhk/fas | fas/validators.py | Python | gpl-2.0 | 14,256 |
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from PyQt4 import QtCore, QtGui
from picard import config
from picard.ui.options import OptionsPage, register_options_page
from picard.ui.ui_options_tags import Ui_TagsOptionsPage
from picard.util.tags import TAG_NAMES
class TagsOptionsPage(OptionsPage):
NAME = "tags"
TITLE = N_("Tags")
PARENT = None
SORT_ORDER = 30
ACTIVE = True
options = [
config.BoolOption("setting", "clear_existing_tags", False),
config.TextOption("setting", "preserved_tags", ""),
config.BoolOption("setting", "write_id3v1", True),
config.BoolOption("setting", "write_id3v23", True),
config.TextOption("setting", "id3v2_encoding", "utf-16"),
config.TextOption("setting", "id3v23_join_with", "/"),
config.BoolOption("setting", "remove_id3_from_flac", False),
config.BoolOption("setting", "remove_ape_from_mp3", False),
config.BoolOption("setting", "tpe2_albumartist", False),
config.BoolOption("setting", "dont_write_tags", False),
config.BoolOption("setting", "preserve_timestamps", False),
]
def __init__(self, parent=None):
super(TagsOptionsPage, self).__init__(parent)
self.ui = Ui_TagsOptionsPage()
self.ui.setupUi(self)
self.ui.write_id3v23.clicked.connect(self.update_encodings)
self.ui.write_id3v24.clicked.connect(self.update_encodings)
self.completer = QtGui.QCompleter(sorted(TAG_NAMES.keys()), self)
self.completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.completer.setWidget(self.ui.preserved_tags)
self.ui.preserved_tags.textEdited.connect(self.preserved_tags_edited)
self.completer.activated.connect(self.completer_activated)
def load(self):
self.ui.write_tags.setChecked(not config.setting["dont_write_tags"])
self.ui.preserve_timestamps.setChecked(config.setting["preserve_timestamps"])
self.ui.clear_existing_tags.setChecked(config.setting["clear_existing_tags"])
self.ui.write_id3v1.setChecked(config.setting["write_id3v1"])
self.ui.write_id3v23.setChecked(config.setting["write_id3v23"])
if config.setting["id3v2_encoding"] == "iso-8859-1":
self.ui.enc_iso88591.setChecked(True)
elif config.setting["id3v2_encoding"] == "utf-16":
self.ui.enc_utf16.setChecked(True)
else:
self.ui.enc_utf8.setChecked(True)
self.ui.id3v23_join_with.setEditText(config.setting["id3v23_join_with"])
self.ui.remove_ape_from_mp3.setChecked(config.setting["remove_ape_from_mp3"])
self.ui.remove_id3_from_flac.setChecked(config.setting["remove_id3_from_flac"])
self.ui.preserved_tags.setText(config.setting["preserved_tags"])
self.update_encodings()
def save(self):
config.setting["dont_write_tags"] = not self.ui.write_tags.isChecked()
config.setting["preserve_timestamps"] = self.ui.preserve_timestamps.isChecked()
clear_existing_tags = self.ui.clear_existing_tags.isChecked()
if clear_existing_tags != config.setting["clear_existing_tags"]:
config.setting["clear_existing_tags"] = clear_existing_tags
self.tagger.window.metadata_box.update()
config.setting["write_id3v1"] = self.ui.write_id3v1.isChecked()
config.setting["write_id3v23"] = self.ui.write_id3v23.isChecked()
config.setting["id3v23_join_with"] = unicode(self.ui.id3v23_join_with.currentText())
if self.ui.enc_iso88591.isChecked():
config.setting["id3v2_encoding"] = "iso-8859-1"
elif self.ui.enc_utf16.isChecked():
config.setting["id3v2_encoding"] = "utf-16"
else:
config.setting["id3v2_encoding"] = "utf-8"
config.setting["remove_ape_from_mp3"] = self.ui.remove_ape_from_mp3.isChecked()
config.setting["remove_id3_from_flac"] = self.ui.remove_id3_from_flac.isChecked()
config.setting["preserved_tags"] = unicode(self.ui.preserved_tags.text())
self.tagger.window.enable_tag_saving_action.setChecked(not config.setting["dont_write_tags"])
def update_encodings(self):
if self.ui.write_id3v23.isChecked():
if self.ui.enc_utf8.isChecked():
self.ui.enc_utf16.setChecked(True)
self.ui.enc_utf8.setEnabled(False)
self.ui.label_id3v23_join_with.setEnabled(True)
self.ui.id3v23_join_with.setEnabled(True)
else:
self.ui.enc_utf8.setEnabled(True)
self.ui.label_id3v23_join_with.setEnabled(False)
self.ui.id3v23_join_with.setEnabled(False)
def preserved_tags_edited(self, text):
prefix = unicode(text)[:self.ui.preserved_tags.cursorPosition()].split(",")[-1]
self.completer.setCompletionPrefix(prefix)
if prefix:
self.completer.complete()
else:
self.completer.popup().hide()
def completer_activated(self, text):
input = self.ui.preserved_tags
current = unicode(input.text())
i = input.cursorPosition()
p = len(self.completer.completionPrefix())
input.setText("%s%s %s" % (current[:i - p], text, current[i:]))
input.setCursorPosition(i - p + len(text) + 1)
register_options_page(TagsOptionsPage)
| dufferzafar/picard | picard/ui/options/tags.py | Python | gpl-2.0 | 6,114 |
#
# yuminstall.py
#
# Copyright (C) 2005, 2006, 2007 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from flags import flags
from errors import *
import sys
import os
import os.path
import shutil
import time
import warnings
import types
import locale
import glob
import tempfile
import itertools
import re
import anaconda_log
import rpm
import rpmUtils
import urlgrabber.progress
import urlgrabber.grabber
from urlgrabber.grabber import URLGrabber, URLGrabError
import yum
import iniparse
from yum.constants import *
from yum.Errors import *
from yum.misc import to_unicode
from yum.yumRepo import YumRepository
from backend import AnacondaBackend
from product import *
from sortedtransaction import SplitMediaTransactionData
from constants import *
from image import *
from compssort import *
import packages
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
P_ = lambda x, y, z: gettext.ldngettext("anaconda", x, y, z)
import network
# specspo stuff
rpm.addMacro("_i18ndomains", "redhat-dist")
import logging
log = logging.getLogger("anaconda")
import urlparse
urlparse.uses_fragment.append('media')
urlgrabber.grabber.default_grabber.opts.user_agent = "%s (anaconda)/%s" %(productName, productVersion)
import iutil
import isys
def size_string (size):
def number_format(s):
return locale.format("%s", s, 1)
retval = None
if size > 1024 * 1024:
size = size / (1024*1024)
retval = _("%s MB") %(number_format(size),)
elif size > 1024:
size = size / 1024
retval = _("%s KB") %(number_format(size),)
else:
retval = P_("%s Byte", "%s Bytes", size) % (number_format(size),)
return to_unicode(retval)
class AnacondaCallback:
def __init__(self, ayum, anaconda, instLog, modeText):
self.repos = ayum.repos
self.ts = ayum.ts
self.ayum = ayum
self.messageWindow = anaconda.intf.messageWindow
self.pulseWindow = anaconda.intf.progressWindow
self.progress = anaconda.intf.instProgress
self.progressWindowClass = anaconda.intf.progressWindow
self.rootPath = anaconda.rootPath
self.initWindow = None
self.progressWindow = None
self.lastprogress = 0
self.incr = 20
self.instLog = instLog
self.modeText = modeText
self.openfile = None
self.inProgressPo = None
def setSizes(self, numpkgs, totalSize, totalFiles):
self.numpkgs = numpkgs
self.totalSize = totalSize
self.totalFiles = totalFiles
self.donepkgs = 0
self.doneSize = 0
self.doneFiles = 0
def callback(self, what, amount, total, h, user):
if what == rpm.RPMCALLBACK_TRANS_START:
# step 6 is the bulk of the ts processing time
if amount == 6:
self.progressWindow = \
self.progressWindowClass (_("Preparing to install"),
_("Preparing transaction from installation source"),
total)
self.incr = total / 10
if what == rpm.RPMCALLBACK_TRANS_PROGRESS:
if self.progressWindow and amount > self.lastprogress + self.incr:
self.progressWindow.set(amount)
self.lastprogress = amount
if what == rpm.RPMCALLBACK_TRANS_STOP and self.progressWindow:
self.progressWindow.pop()
if what == rpm.RPMCALLBACK_INST_OPEN_FILE:
(hdr, rpmloc) = h
# hate hate hate at epochs...
epoch = hdr['epoch']
if epoch is not None:
epoch = str(epoch)
txmbrs = self.ayum.tsInfo.matchNaevr(hdr['name'], hdr['arch'],
epoch, hdr['version'],
hdr['release'])
if len(txmbrs) == 0:
raise RuntimeError, "Unable to find package %s-%s-%s.%s" %(hdr['name'], hdr['version'], hdr['release'], hdr['arch'])
po = txmbrs[0].po
repo = self.repos.getRepo(po.repoid)
pkgStr = "%s-%s-%s.%s" % (po.name, po.version, po.release, po.arch)
s = to_unicode(_("<b>Installing %(pkgStr)s</b> (%(size)s)\n")) \
% {'pkgStr': pkgStr, 'size': size_string(hdr['size'])}
summary = to_unicode(gettext.ldgettext("redhat-dist", hdr['summary'] or ""))
s += summary.strip()
self.progress.set_label(s)
self.instLog.write(self.modeText % str(pkgStr))
self.instLog.flush()
self.openfile = None
while self.openfile is None:
try:
fn = repo.getPackage(po)
f = open(fn, 'r')
self.openfile = f
except yum.Errors.NoMoreMirrorsRepoError:
self.ayum._handleFailure(po)
except IOError:
self.ayum._handleFailure(po)
except yum.Errors.RepoError, e:
continue
self.inProgressPo = po
return self.openfile.fileno()
elif what == rpm.RPMCALLBACK_INST_CLOSE_FILE:
if self.initWindow:
self.initWindow.pop()
self.initWindow = None
(hdr, rpmloc) = h
fn = self.openfile.name
self.openfile.close()
self.openfile = None
if os.path.dirname(fn).startswith("%s/var/cache/yum/" % self.rootPath):
try:
os.unlink(fn)
except OSError as e:
log.debug("unable to remove file %s" %(e.strerror,))
self.donepkgs += 1
self.doneSize += self.inProgressPo.returnSimple("installedsize") / 1024.0
self.doneFiles += len(hdr[rpm.RPMTAG_BASENAMES])
if self.donepkgs <= self.numpkgs:
self.progress.set_text(P_("Packages completed: "
"%(donepkgs)d of %(numpkgs)d",
"Packages completed: "
"%(donepkgs)d of %(numpkgs)d",
self.numpkgs)
% {'donepkgs': self.donepkgs,
'numpkgs': self.numpkgs})
self.progress.set_fraction(float(self.doneSize / self.totalSize))
self.progress.processEvents()
self.inProgressPo = None
elif what in (rpm.RPMCALLBACK_UNINST_START,
rpm.RPMCALLBACK_UNINST_STOP):
if self.initWindow is None:
self.initWindow = self.pulseWindow(_("Finishing upgrade"),
_("Finishing upgrade process. This may take a little while."),
0, pulse=True)
else:
self.initWindow.pulse()
elif what in (rpm.RPMCALLBACK_CPIO_ERROR,
rpm.RPMCALLBACK_UNPACK_ERROR,
rpm.RPMCALLBACK_SCRIPT_ERROR):
if not isinstance(h, types.TupleType):
h = (h, None)
(hdr, rpmloc) = h
# If this is a cleanup/remove, then hdr is a string not a header.
if isinstance(hdr, rpm.hdr):
name = hdr['name']
else:
name = hdr
# Script errors store whether or not they're fatal in "total". So,
# we should only error out for fatal script errors or the cpio and
# unpack problems.
if what != rpm.RPMCALLBACK_SCRIPT_ERROR or total:
self.messageWindow(_("Error Installing Package"),
_("A fatal error occurred when installing the %s "
"package. This could indicate errors when reading "
"the installation media. Installation cannot "
"continue.") % name,
type="custom", custom_icon="error",
custom_buttons=[_("_Exit installer")])
sys.exit(1)
if self.initWindow is None:
self.progress.processEvents()
class AnacondaYumRepo(YumRepository):
def __init__(self, *args, **kwargs):
YumRepository.__init__(self, *args, **kwargs)
self.enablegroups = True
self._anacondaBaseURLs = []
def needsNetwork(self):
def _isURL(s):
return s.startswith("http") or s.startswith("ftp")
if len(self.baseurl) > 0:
return len(filter(lambda s: _isURL(s), self.baseurl)) > 0
elif self.mirrorlist:
return _isURL(self.mirrorlist)
else:
return False
def dirCleanup(self):
cachedir = self.getAttribute('cachedir')
if os.path.isdir(cachedir):
if not self.needsNetwork() or self.name == "Installation Repo":
shutil.rmtree(cachedir)
else:
if os.path.exists("%s/headers" % cachedir):
shutil.rmtree("%s/headers" % cachedir)
if os.path.exists("%s/packages" % cachedir):
shutil.rmtree("%s/packages" % cachedir)
# needed to store nfs: repo url that yum doesn't know
def _getAnacondaBaseURLs(self):
return self._anacondaBaseURLs or self.baseurl or [self.mirrorlist]
def _setAnacondaBaseURLs(self, value):
self._anacondaBaseURLs = value
anacondaBaseURLs = property(_getAnacondaBaseURLs, _setAnacondaBaseURLs,
doc="Extends AnacondaYum.baseurl to store non-yum urls:")
class YumSorter(yum.YumBase):
def _transactionDataFactory(self):
return SplitMediaTransactionData()
class AnacondaYum(YumSorter):
def __init__(self, anaconda):
YumSorter.__init__(self)
self.anaconda = anaconda
self._timestamp = None
self.repoIDcounter = itertools.count()
# Only needed for hard drive and nfsiso installs.
self._discImages = {}
self.isodir = None
# Only needed for media installs.
self.currentMedia = None
self.mediagrabber = None
# Where is the source media mounted? This is the directory
# where Packages/ is located.
self.tree = "/mnt/source"
self.macros = {}
if flags.selinux:
for directory in ("/tmp/updates",
"/etc/selinux/targeted/contexts/files",
"/etc/security/selinux/src/policy/file_contexts",
"/etc/security/selinux"):
fn = "%s/file_contexts" %(directory,)
if os.access(fn, os.R_OK):
break
self.macros["__file_context_path"] = fn
else:
self.macros["__file_context_path"] = "%{nil}"
self.updates = []
self.localPackages = []
def setup(self):
# yum doesn't understand all our method URLs, so use this for all
# except FTP and HTTP installs.
self._baseRepoURL = "file://%s" % self.tree
while True:
try:
self.configBaseURL()
break
except SystemError, e:
self.anaconda.intf.messageWindow(_("Error Setting Up Repository"),
_("The following error occurred while setting up the "
"installation repository:\n\n%(e)s\n\nPlease provide the "
"correct information for installing %(productName)s.")
% {'e': e, 'productName': productName})
self.anaconda.methodstr = self.anaconda.intf.methodstrRepoWindow(self.anaconda.methodstr or "cdrom:")
self.doConfigSetup(root=self.anaconda.rootPath)
self.conf.installonlypkgs = []
def _switchCD(self, discnum):
if os.access("%s/.discinfo" % self.tree, os.R_OK):
f = open("%s/.discinfo" % self.tree)
self._timestamp = f.readline().strip()
f.close()
dev = self.anaconda.storage.devicetree.getDeviceByName(self.anaconda.mediaDevice)
dev.format.mountpoint = self.tree
# If self.currentMedia is None, then there shouldn't be anything
# mounted. Before going further, see if the correct disc is already
# in the drive. This saves a useless eject and insert if the user
# has for some reason already put the disc in the drive.
if self.currentMedia is None:
try:
dev.format.mount()
if verifyMedia(self.tree, discnum, None):
self.currentMedia = discnum
return
dev.format.unmount()
except:
pass
else:
unmountCD(dev, self.anaconda.intf.messageWindow)
self.currentMedia = None
dev.eject()
while True:
if self.anaconda.intf:
self.anaconda.intf.beep()
self.anaconda.intf.messageWindow(_("Change Disc"),
_("Please insert %(productName)s disc %(discnum)d to continue.")
% {'productName': productName, 'discnum': discnum})
try:
dev.format.mount()
if verifyMedia(self.tree, discnum, self._timestamp):
self.currentMedia = discnum
break
self.anaconda.intf.messageWindow(_("Wrong Disc"),
_("That's not the correct %s disc.")
% (productName,))
dev.format.unmount()
dev.eject()
except:
self.anaconda.intf.messageWindow(_("Error"),
_("Unable to access the disc."))
def _switchImage(self, discnum):
umountImage(self.tree, self.currentMedia)
self.currentMedia = None
# mountDirectory checks before doing anything, so it's safe to
# call this repeatedly.
mountDirectory(self.anaconda.methodstr,
self.anaconda.intf.messageWindow)
self._discImages = mountImage(self.isodir, self.tree, discnum,
self.anaconda.intf.messageWindow,
discImages=self._discImages)
self.currentMedia = discnum
def configBaseURL(self):
# We only have a methodstr if method= or repo= was passed to
# anaconda. No source for this base repo (the CD media, NFS,
# whatever) is mounted yet since loader only mounts the source
# for the stage2 image. We need to set up the source mount
# now.
if flags.cmdline.has_key("preupgrade"):
path = "/var/cache/yum/preupgrade"
self.anaconda.methodstr = "hd::%s" % path
self._baseRepoURL = "file:///mnt/sysimage/%s" % path
elif self.anaconda.methodstr:
m = self.anaconda.methodstr
if m.startswith("hd:"):
if m.count(":") == 2:
(device, path) = m[3:].split(":")
else:
(device, fstype, path) = m[3:].split(":")
self.isodir = "/mnt/isodir/%s" % path
# This takes care of mounting /mnt/isodir first.
self._switchImage(1)
self.mediagrabber = self.mediaHandler
elif m.startswith("nfsiso:"):
self.isodir = "/mnt/isodir"
# Calling _switchImage takes care of mounting /mnt/isodir first.
if not network.hasActiveNetDev():
if not self.anaconda.intf.enableNetwork():
self._baseRepoURL = None
return
urlgrabber.grabber.reset_curl_obj()
self._switchImage(1)
self.mediagrabber = self.mediaHandler
elif m.startswith("http") or m.startswith("ftp:"):
self._baseRepoURL = m
elif m.startswith("nfs:"):
if not network.hasActiveNetDev():
if not self.anaconda.intf.enableNetwork():
self._baseRepoURL = None
urlgrabber.grabber.reset_curl_obj()
(opts, server, path) = iutil.parseNfsUrl(m)
isys.mount(server+":"+path, self.tree, "nfs", options=opts)
# This really should be fixed in loader instead but for now see
# if there's images and if so go with this being an NFSISO
# install instead.
images = findIsoImages(self.tree, self.anaconda.intf.messageWindow)
if images != {}:
isys.umount(self.tree, removeDir=False)
self.anaconda.methodstr = "nfsiso:%s" % m[4:]
self.configBaseURL()
return
elif m.startswith("cdrom:"):
self._switchCD(1)
self.mediagrabber = self.mediaHandler
self._baseRepoURL = "file://%s" % self.tree
else:
# No methodstr was given. In order to find an installation source,
# we should first check to see if there's a CD/DVD with packages
# on it, and then default to the mirrorlist URL. The user can
# always change the repo with the repo editor later.
cdr = scanForMedia(self.tree, self.anaconda.storage)
if cdr:
self.mediagrabber = self.mediaHandler
self.anaconda.mediaDevice = cdr
self.currentMedia = 1
log.info("found installation media on %s" % cdr)
else:
# No CD with media on it and no repo=/method= parameter, so
# default to using whatever's enabled in /etc/yum.repos.d/
self._baseRepoURL = None
def configBaseRepo(self, root='/'):
# Create the "base" repo object, assuming there is one. Otherwise we
# just skip all this and use the defaults from /etc/yum.repos.d.
if not self._baseRepoURL:
return
# add default repos
anacondabaseurl = (self.anaconda.methodstr or
"cdrom:%s" % (self.anaconda.mediaDevice))
anacondabasepaths = self.anaconda.instClass.getPackagePaths(anacondabaseurl)
for (name, uri) in self.anaconda.instClass.getPackagePaths(self._baseRepoURL).items():
rid = name.replace(" ", "")
repo = AnacondaYumRepo("anaconda-%s-%s" % (rid, productStamp))
repo.baseurl = uri
repo.anacondaBaseURLs = anacondabasepaths[name]
repo.name = name
repo.cost = 100
if self.anaconda.mediaDevice or self.isodir:
repo.mediaid = getMediaId(self.tree)
log.info("set mediaid of repo %s to: %s" % (rid, repo.mediaid))
repo.enable()
self.repos.add(repo)
def mediaHandler(self, *args, **kwargs):
mediaid = kwargs["mediaid"]
discnum = kwargs["discnum"]
relative = kwargs["relative"]
# The package exists on media other than what's mounted right now.
if discnum != self.currentMedia:
log.info("switching from media #%s to #%s for %s" %
(self.currentMedia, discnum, relative))
# Unmount any currently mounted ISO images and mount the one
# containing the requested packages.
if self.isodir:
self._switchImage(discnum)
else:
self._switchCD(discnum)
ug = URLGrabber(checkfunc=kwargs["checkfunc"])
ug.urlgrab("%s/%s" % (self.tree, kwargs["relative"]), kwargs["local"],
text=kwargs["text"], range=kwargs["range"], copy_local=1)
return kwargs["local"]
# XXX: This is straight out of yum, but we need to override it here in
# order to use our own repo class.
def readRepoConfig(self, parser, section):
'''Parse an INI file section for a repository.
@param parser: ConfParser or similar to read INI file values from.
@param section: INI file section to read.
@return: YumRepository instance.
'''
repo = AnacondaYumRepo(section)
repo.populate(parser, section, self.conf)
# Ensure that the repo name is set
if not repo.name:
repo.name = section
self.logger.error(_('Repository %r is missing name in configuration, '
'using id') % section)
# Set attributes not from the config file
repo.yumvar.update(self.conf.yumvar)
repo.cfg = parser
if "-source" in repo.id or "-debuginfo" in repo.id:
name = repo.name
del(repo)
raise RepoError, "Repo %s contains -source or -debuginfo, excluding" % name
# this is a little hard-coded, but it's effective
if not BETANAG and ("rawhide" in repo.id or "development" in repo.id):
name = repo.name
del(repo)
raise RepoError, "Excluding devel repo %s for non-devel anaconda" % name
if BETANAG and not repo.enabled:
name = repo.name
del(repo)
raise RepoError, "Excluding disabled repo %s for prerelease" % name
# If repo=/method= was passed in, we want to default these extra
# repos to off.
if self._baseRepoURL:
repo.enabled = False
return repo
# We need to make sure $releasever gets set up before .repo files are
# read. Since there's no redhat-release package in /mnt/sysimage (and
# won't be for quite a while), we need to do our own substutition.
def _getReleasever(self):
from ConfigParser import ConfigParser
c = ConfigParser()
try:
if os.access("%s/.treeinfo" % self.anaconda.methodstr, os.R_OK):
ConfigParser.read(c, "%s/.treeinfo" % self.anaconda.methodstr)
else:
ug = URLGrabber()
ug.urlgrab("%s/.treeinfo" % self.anaconda.methodstr,
"/tmp/.treeinfo", copy_local=1)
ConfigParser.read(c, "/tmp/.treeinfo")
return c.get("general", "version")
except:
return productVersion
# Override this method so yum doesn't nuke our existing logging config.
def doLoggingSetup(self, *args, **kwargs):
import yum.logginglevels
file_handler = logging.FileHandler("/tmp/yum.log")
file_formatter = logging.Formatter("[%(asctime)s] %(levelname)-8s: %(message)s")
file_handler.setFormatter(file_formatter)
tty3_handler = logging.FileHandler("/dev/tty3")
tty3_formatter = logging.Formatter(anaconda_log.TTY_FORMAT,
anaconda_log.DATE_FORMAT)
tty3_handler.setFormatter(tty3_formatter)
verbose = logging.getLogger("yum.verbose")
verbose.setLevel(logging.DEBUG)
verbose.propagate = False
verbose.addHandler(file_handler)
logger = logging.getLogger("yum")
logger.propagate = False
logger.setLevel(yum.logginglevels.INFO_2)
logger.addHandler(file_handler)
anaconda_log.autoSetLevel(tty3_handler, True)
tty3_handler.setLevel(anaconda_log.logger.tty_loglevel)
logger.addHandler(tty3_handler)
# XXX filelogger is set in setFileLog - do we or user want it?
filelogger = logging.getLogger("yum.filelogging")
filelogger.setLevel(logging.INFO)
filelogger.propagate = False
def doConfigSetup(self, fn='/tmp/anaconda-yum.conf', root='/'):
if hasattr(self, "preconf"):
self.preconf.fn = fn
self.preconf.root = root
self.preconf.releasever = self._getReleasever()
self.preconf.enabled_plugins = ["whiteout", "blacklist"]
YumSorter._getConfig(self)
else:
YumSorter._getConfig(self, fn=fn, root=root,
enabled_plugins=["whiteout", "blacklist"])
self.configBaseRepo(root=root)
extraRepos = []
ddArch = os.uname()[4]
#Add the Driver disc repos to Yum
for d in glob.glob(DD_RPMS):
dirname = os.path.basename(d)
rid = "anaconda-%s" % dirname
repo = AnacondaYumRepo(rid)
repo.baseurl = [ "file:///%s" % d ]
repo.name = "Driver Disk %s" % dirname.split("-")[1]
repo.enable()
extraRepos.append(repo)
if self.anaconda.ksdata:
# This is the same pattern as from loader/urls.c:splitProxyParam.
pattern = re.compile("([[:alpha:]]+://)?(([[:alnum:]]+)(:[^:@]+)?@)?([^:]+)(:[[:digit:]]+)?(/.*)?")
for ksrepo in self.anaconda.ksdata.repo.repoList:
anacondaBaseURLs = [ksrepo.baseurl]
# yum doesn't understand nfs:// and doesn't want to. We need
# to first do the mount, then translate it into a file:// that
# yum does understand.
# "nfs:" and "nfs://" prefixes are accepted in ks repo --baseurl
if ksrepo.baseurl and ksrepo.baseurl.startswith("nfs:"):
if not network.hasActiveNetDev() and not self.anaconda.intf.enableNetwork():
self.anaconda.intf.messageWindow(_("No Network Available"),
_("Some of your software repositories require "
"networking, but there was an error enabling the "
"network on your system."),
type="custom", custom_icon="error",
custom_buttons=[_("_Exit installer")])
sys.exit(1)
urlgrabber.grabber.reset_curl_obj()
dest = tempfile.mkdtemp("", ksrepo.name.replace(" ", ""), "/mnt")
# handle "nfs://" prefix
if ksrepo.baseurl[4:6] == '//':
ksrepo.baseurl = ksrepo.baseurl.replace('//', '', 1)
anacondaBaseURLs = [ksrepo.baseurl]
try:
isys.mount(ksrepo.baseurl[4:], dest, "nfs")
except Exception as e:
log.error("error mounting NFS repo: %s" % e)
ksrepo.baseurl = "file://%s" % dest
repo = AnacondaYumRepo(ksrepo.name)
repo.mirrorlist = ksrepo.mirrorlist
repo.name = ksrepo.name
if not ksrepo.baseurl:
repo.baseurl = []
else:
repo.baseurl = [ ksrepo.baseurl ]
repo.anacondaBaseURLs = anacondaBaseURLs
if ksrepo.cost:
repo.cost = ksrepo.cost
if ksrepo.excludepkgs:
repo.exclude = ksrepo.excludepkgs
if ksrepo.includepkgs:
repo.include = ksrepo.includepkgs
if ksrepo.proxy:
m = pattern.match(ksrepo.proxy)
if m and m.group(5):
# If both a host and port was found, just paste them
# together using the colon at the beginning of the port
# match as a separator. Otherwise, just use the host.
if m.group(6):
repo.proxy = m.group(5) + m.group(6)
else:
repo.proxy = m.group(5)
# yum also requires a protocol. If none was given,
# default to http.
if m.group(1):
repo.proxy = m.group(1) + repo.proxy
else:
repo.proxy = "http://" + repo.proxy
if m and m.group(3):
repo.proxy_username = m.group(3)
if m and m.group(4):
# Skip the leading colon.
repo.proxy_password = m.group(4)[1:]
repo.enable()
extraRepos.append(repo)
for repo in extraRepos:
try:
self.repos.add(repo)
log.info("added repository %s with URL %s" % (repo.name, repo.mirrorlist or repo.baseurl))
except:
log.warning("ignoring duplicate repository %s with URL %s" % (repo.name, repo.mirrorlist or repo.baseurl))
self.repos.setCacheDir(self.conf.cachedir)
if os.path.exists("%s/boot/upgrade/install.img" % self.anaconda.rootPath):
log.info("REMOVING stage2 image from %s /boot/upgrade" % self.anaconda.rootPath )
try:
os.unlink("%s/boot/upgrade/install.img" % self.anaconda.rootPath)
except:
log.warning("failed to clean /boot/upgrade")
def downloadHeader(self, po):
while True:
# retrying version of download header
try:
YumSorter.downloadHeader(self, po)
break
except yum.Errors.NoMoreMirrorsRepoError:
self._handleFailure(po)
except IOError:
self._handleFailure(po)
except yum.Errors.RepoError, e:
continue
def _handleFailure(self, package):
if not self.isodir and self.currentMedia:
buttons = [_("Re_boot"), _("_Eject")]
else:
buttons = [_("Re_boot"), _("_Retry")]
pkgFile = to_unicode(os.path.basename(package.remote_path))
rc = self.anaconda.intf.messageWindow(_("Error"),
_("The file %s cannot be opened. This is due to a missing "
"file, a corrupt package or corrupt media. Please "
"verify your installation source.\n\n"
"If you exit, your system will be left in an inconsistent "
"state that will likely require reinstallation.\n\n") %
(pkgFile,),
type="custom", custom_icon="error",
custom_buttons=buttons)
if rc == 0:
sys.exit(0)
else:
if os.path.exists(package.localPkg()):
os.unlink(package.localPkg())
if not self.isodir and self.currentMedia:
self._switchCD(self.currentMedia)
else:
return
def mirrorFailureCB (self, obj, *args, **kwargs):
# This gets called when a mirror fails, but it cannot know whether
# or not there are other mirrors left to try, since it cannot know
# which mirror we were on when we started this particular download.
# Whenever we have run out of mirrors the grabber's get/open/retrieve
# method will raise a URLGrabError exception with errno 256.
grab = self.repos.getRepo(kwargs["repo"]).grab
log.warning("Failed to get %s from mirror %d/%d, "
"or downloaded file is corrupt" % (obj.url, grab._next + 1,
len(grab.mirrors)))
if self.currentMedia:
dev = self.anaconda.storage.devicetree.getDeviceByName(self.anaconda.mediaDevice)
dev.format.mountpoint = self.tree
unmountCD(dev, self.anaconda.intf.messageWindow)
self.currentMedia = None
def urlgrabberFailureCB (self, obj, *args, **kwargs):
if hasattr(obj, "exception"):
log.warning("Try %s/%s for %s failed: %s" % (obj.tries, obj.retry, obj.url, obj.exception))
else:
log.warning("Try %s/%s for %s failed" % (obj.tries, obj.retry, obj.url))
if obj.tries == obj.retry:
return
delay = 0.25*(2**(obj.tries-1))
if delay > 1:
w = self.anaconda.intf.waitWindow(_("Retrying"), _("Retrying download."))
time.sleep(delay)
w.pop()
else:
time.sleep(delay)
def getDownloadPkgs(self):
downloadpkgs = []
totalSize = 0
totalFiles = 0
for txmbr in self.tsInfo.getMembersWithState(output_states=TS_INSTALL_STATES):
if txmbr.po:
totalSize += int(txmbr.po.returnSimple("installedsize")) / 1024
for filetype in txmbr.po.returnFileTypes():
totalFiles += len(txmbr.po.returnFileEntries(ftype=filetype))
downloadpkgs.append(txmbr.po)
return (downloadpkgs, totalSize, totalFiles)
def setColor(self):
if rpmUtils.arch.isMultiLibArch():
self.ts.ts.setColor(3)
def run(self, instLog, cb, intf, id):
def mediasort(a, b):
# sort so that first CD comes first, etc. -99 is a magic number
# to tell us that the cd should be last
if a == -99:
return 1
elif b == -99:
return -1
if a < b:
return -1
elif a > b:
return 1
return 0
self.initActionTs()
if self.anaconda.upgrade:
self.ts.ts.setProbFilter(~rpm.RPMPROB_FILTER_DISKSPACE)
self.setColor()
# If we don't have any required media assume single disc
if self.tsInfo.reqmedia == {}:
self.tsInfo.reqmedia[0] = None
mkeys = self.tsInfo.reqmedia.keys()
mkeys.sort(mediasort)
stage2img = "%s/images/install.img" % self.tree
if os.path.exists(stage2img):
if self.anaconda.backend.mountInstallImage(self.anaconda, stage2img):
self.anaconda.storage.umountFilesystems()
return DISPATCH_BACK
for i in mkeys:
self.tsInfo.curmedia = i
if i > 0:
pkgtup = self.tsInfo.reqmedia[i][0]
try:
self.dsCallback = DownloadHeaderProgress(intf, self)
self.populateTs(keepold=0)
self.dsCallback.pop()
self.dsCallback = None
except RepoError, e:
msg = _("There was an error running your transaction for "
"the following reason: %s\n") % str(e)
if self.anaconda.upgrade:
rc = intf.messageWindow(_("Error"), msg, type="custom",
custom_icon="error",
custom_buttons=[_("_Exit installer")])
sys.exit(1)
else:
rc = intf.messageWindow(_("Error"), msg,
type="custom", custom_icon="error",
custom_buttons=[_("_Back"), _("_Exit installer")])
if rc == 1:
sys.exit(1)
else:
self.tsInfo.curmedia = None
return DISPATCH_BACK
self.ts.check()
self.ts.order()
if self._run(instLog, cb, intf) == DISPATCH_BACK:
self.tsInfo.curmedia = None
return DISPATCH_BACK
self.ts.close()
def _run(self, instLog, cb, intf):
# set log fd. FIXME: this is ugly. see changelog entry from 2005-09-13
self.ts.ts.scriptFd = instLog.fileno()
rpm.setLogFile(instLog)
uniqueProbs = {}
spaceneeded = {}
spaceprob = ""
fileConflicts = []
fileprob = ""
try:
self.runTransaction(cb=cb)
except YumBaseError, probs:
# FIXME: we need to actually look at these problems...
probTypes = { rpm.RPMPROB_NEW_FILE_CONFLICT : _('file conflicts'),
rpm.RPMPROB_FILE_CONFLICT : _('file conflicts'),
rpm.RPMPROB_OLDPACKAGE: _('older package(s)'),
rpm.RPMPROB_DISKSPACE: _('insufficient disk space'),
rpm.RPMPROB_DISKNODES: _('insufficient disk inodes'),
rpm.RPMPROB_CONFLICT: _('package conflicts'),
rpm.RPMPROB_PKG_INSTALLED: _('package already installed'),
rpm.RPMPROB_REQUIRES: _('required package'),
rpm.RPMPROB_BADARCH: _('package for incorrect arch'),
rpm.RPMPROB_BADOS: _('package for incorrect os'),
}
for (descr, (ty, mount, need)) in probs.value: # FIXME: probs.value???
log.error("%s: %s" %(probTypes[ty], descr))
if not uniqueProbs.has_key(ty) and probTypes.has_key(ty):
uniqueProbs[ty] = probTypes[ty]
if ty == rpm.RPMPROB_DISKSPACE:
spaceneeded[mount] = need
elif ty in [rpm.RPMPROB_NEW_FILE_CONFLICT, rpm.RPMPROB_FILE_CONFLICT]:
fileConflicts.append(descr)
if spaceneeded:
spaceprob = _("You need more space on the following "
"file systems:\n")
for (mount, need) in spaceneeded.items():
log.info("(%s, %s)" %(mount, need))
if mount.startswith("/mnt/sysimage/"):
mount.replace("/mnt/sysimage", "")
elif mount.startswith("/mnt/sysimage"):
mount = "/" + mount.replace("/mnt/sysimage", "")
spaceprob += "%d M on %s\n" % (need / (1024*1024), mount)
elif fileConflicts:
fileprob = _("There were file conflicts when checking the "
"packages to be installed:\n%s\n") % ("\n".join(fileConflicts),)
msg = _("There was an error running your transaction for "
"the following reason(s): %s.\n") % ', '.join(uniqueProbs.values())
spaceprob = to_unicode(spaceprob)
fileprob = to_unicode(fileprob)
if len(self.anaconda.backend.getRequiredMedia()) > 1 or self.anaconda.upgrade:
intf.detailedMessageWindow(_("Error Running Transaction"),
msg, spaceprob + "\n" + fileprob, type="custom",
custom_icon="error", custom_buttons=[_("_Exit installer")])
sys.exit(1)
else:
rc = intf.detailedMessageWindow(_("Error Running Transaction"),
msg, spaceprob + "\n" + fileprob, type="custom",
custom_icon="error",
custom_buttons=[_("_Back"), _("_Exit installer")])
if rc == 1:
sys.exit(1)
else:
self._undoDepInstalls()
return DISPATCH_BACK
def doMacros(self):
for (key, val) in self.macros.items():
rpm.addMacro(key, val)
def simpleDBInstalled(self, name, arch=None):
# FIXME: doing this directly instead of using self.rpmdb.installed()
# speeds things up by 400%
mi = self.ts.ts.dbMatch('name', name)
if mi.count() == 0:
return False
if arch is None:
return True
if arch in map(lambda h: h['arch'], mi):
return True
return False
def isPackageInstalled(self, name = None, epoch = None, version = None,
release = None, arch = None, po = None):
# FIXME: this sucks. we should probably suck it into yum proper
# but it'll need a bit of cleanup first.
if po is not None:
(name, epoch, version, release, arch) = po.returnNevraTuple()
installed = False
if name and not (epoch or version or release or arch):
installed = self.simpleDBInstalled(name)
elif self.rpmdb.installed(name = name, epoch = epoch, ver = version,
rel = release, arch = arch):
installed = True
lst = self.tsInfo.matchNaevr(name = name, epoch = epoch,
ver = version, rel = release,
arch = arch)
for txmbr in lst:
if txmbr.output_state in TS_INSTALL_STATES:
return True
if installed and len(lst) > 0:
# if we get here, then it was installed, but it's in the tsInfo
# for an erase or obsoleted --> not going to be installed at end
return False
return installed
def isGroupInstalled(self, grp):
if grp.selected:
return True
elif grp.installed and not grp.toremove:
return True
return False
def _pkgExists(self, pkg):
"""Whether or not a given package exists in our universe."""
try:
pkgs = self.pkgSack.returnNewestByName(pkg)
return True
except yum.Errors.PackageSackError:
pass
try:
pkgs = self.rpmdb.returnNewestByName(pkg)
return True
except (IndexError, yum.Errors.PackageSackError):
pass
return False
def _groupHasPackages(self, grp):
# this checks to see if the given group has any packages available
# (ie, already installed or in the sack of available packages)
# so that we don't show empty groups. also, if there are mandatory
# packages and we have none of them, don't show
for pkg in grp.mandatory_packages.keys():
if self._pkgExists(pkg):
return True
if len(grp.mandatory_packages) > 0:
return False
for pkg in grp.default_packages.keys() + grp.optional_packages.keys():
if self._pkgExists(pkg):
return True
return False
class YumBackend(AnacondaBackend):
def __init__ (self, anaconda):
AnacondaBackend.__init__(self, anaconda)
self.supportsPackageSelection = True
buf = """
[main]
installroot=%s
cachedir=/var/cache/yum/$basearch/$releasever
keepcache=0
logfile=/tmp/yum.log
metadata_expire=0
obsoletes=True
pluginpath=/usr/lib/yum-plugins,/tmp/updates/yum-plugins
pluginconfpath=/etc/yum/pluginconf.d,/tmp/updates/pluginconf.d
plugins=1
reposdir=/etc/anaconda.repos.d,/tmp/updates/anaconda.repos.d,/tmp/product/anaconda.repos.d
""" % (anaconda.rootPath)
if anaconda.proxy:
buf += "proxy=%s\n" % anaconda.proxy
if anaconda.proxyUsername:
buf += "proxy_username=%s\n" % anaconda.proxyUsername
if anaconda.proxyPassword:
buf += "proxy_password=%s\n" % anaconda.proxyPassword
fd = open("/tmp/anaconda-yum.conf", "w")
fd.write(buf)
fd.close()
def complete(self, anaconda):
if not anaconda.mediaDevice and os.path.ismount(self.ayum.tree):
isys.umount(self.ayum.tree)
anaconda.backend.removeInstallImage()
# clean up rpmdb locks so that kickstart %post scripts aren't
# unhappy (#496961)
iutil.resetRpmDb(anaconda.rootPath)
def doBackendSetup(self, anaconda):
if anaconda.dir == DISPATCH_BACK:
return DISPATCH_BACK
if anaconda.upgrade:
# FIXME: make sure that the rpmdb doesn't have stale locks :/
iutil.resetRpmDb(anaconda.rootPath)
anaconda.backend.freetmp(anaconda)
self.ayum = AnacondaYum(anaconda)
self.ayum.setup()
self.ayum.doMacros()
# If any enabled repositories require networking, go ahead and bring
# it up now. No need to have people wait for the timeout when we
# know this in advance.
for repo in self.ayum.repos.listEnabled():
if repo.needsNetwork() and not network.hasActiveNetDev():
if not anaconda.intf.enableNetwork():
anaconda.intf.messageWindow(_("No Network Available"),
_("Some of your software repositories require "
"networking, but there was an error enabling the "
"network on your system."),
type="custom", custom_icon="error",
custom_buttons=[_("_Exit installer")])
sys.exit(1)
urlgrabber.grabber.reset_curl_obj()
break
self.doRepoSetup(anaconda)
self.doSackSetup(anaconda)
self.doGroupSetup(anaconda)
self.ayum.doMacros()
def doGroupSetup(self, anaconda):
while True:
try:
self.ayum.doGroupSetup()
except (GroupsError, NoSuchGroup, RepoError), e:
buttons = [_("_Exit installer"), _("_Retry")]
else:
break # success
rc = anaconda.intf.messageWindow(_("Error"),
_("Unable to read group information "
"from repositories. This is "
"a problem with the generation "
"of your install tree."),
type="custom", custom_icon="error",
custom_buttons = buttons)
if rc == 0:
sys.exit(0)
else:
self.ayum._setGroups(None)
continue
def doRepoSetup(self, anaconda, thisrepo = None, fatalerrors = True):
self.__withFuncDo(anaconda, lambda r: self.ayum.doRepoSetup(thisrepo=r.id),
thisrepo=thisrepo, fatalerrors=fatalerrors)
def doSackSetup(self, anaconda, thisrepo = None, fatalerrors = True):
self.__withFuncDo(anaconda, lambda r: self.ayum.doSackSetup(thisrepo=r.id),
thisrepo=thisrepo, fatalerrors=fatalerrors)
def __withFuncDo(self, anaconda, fn, thisrepo=None, fatalerrors=True):
# Don't do this if we're being called as a dispatcher step (instead
# of being called when a repo is added via the UI) and we're going
# back.
if thisrepo is None and anaconda.dir == DISPATCH_BACK:
return
# We want to call the function one repo at a time so we have some
# concept of which repo didn't set up correctly.
if thisrepo is not None:
repos = [self.ayum.repos.getRepo(thisrepo)]
else:
repos = self.ayum.repos.listEnabled()
for repo in repos:
if repo.name is None:
txt = _("Retrieving installation information.")
else:
txt = _("Retrieving installation information for %s.")%(repo.name)
waitwin = anaconda.intf.waitWindow(_("Installation Progress"), txt)
while True:
try:
fn(repo)
waitwin.pop()
except RepoError, e:
waitwin.pop()
buttons = [_("_Exit installer"), _("Edit"), _("_Retry")]
else:
break # success
if anaconda.ksdata:
buttons.append(_("_Continue"))
if not fatalerrors:
raise RepoError, e
rc = anaconda.intf.messageWindow(_("Error"),
_("Unable to read package metadata. This may be "
"due to a missing repodata directory. Please "
"ensure that your install tree has been "
"correctly generated.\n\n%s" % e),
type="custom", custom_icon="error",
custom_buttons=buttons)
if rc == 0:
# abort
sys.exit(0)
elif rc == 1:
# edit
anaconda.intf.editRepoWindow(repo)
break
elif rc == 2:
# retry, but only if button is present
continue
else:
# continue, but only if button is present
self.ayum.repos.delete(repo.id)
break
# if we're in kickstart the repo may have been deleted just above
try:
self.ayum.repos.getRepo(repo.id)
except RepoError:
log.debug("repo %s has been removed" % (repo.id,))
continue
repo.setFailureObj(self.ayum.urlgrabberFailureCB)
repo.setMirrorFailureObj((self.ayum.mirrorFailureCB, (),
{"repo": repo.id}))
self.ayum.repos.callback = None
def getDefaultGroups(self, anaconda):
langs = anaconda.instLanguage.getCurrentLangSearchList()
rc = map(lambda x: x.groupid,
filter(lambda x: x.default, self.ayum.comps.groups))
for g in self.ayum.comps.groups:
if g.langonly in langs:
rc.append(g.groupid)
return rc
def resetPackageSelections(self):
"""Reset the package selection to an empty state."""
for txmbr in self.ayum.tsInfo:
self.ayum.tsInfo.remove(txmbr.pkgtup)
self.ayum.tsInfo.conditionals.clear()
for grp in self.ayum.comps.groups:
grp.selected = False
def selectModulePackages(self, anaconda, kernelPkgName):
(base, sep, ext) = kernelPkgName.partition("-")
moduleProvides = []
for (path, name) in anaconda.extraModules:
if ext != "":
moduleProvides.append("dud-%s-%s" % (name, ext))
else:
moduleProvides.append("dud-%s" % name)
#We need to install the packages which contain modules from DriverDiscs
for modPath in isys.modulesWithPaths():
if modPath.startswith(DD_EXTRACTED):
moduleProvides.append(modPath[len(DD_EXTRACTED):])
else:
continue
for module in moduleProvides:
pkgs = self.ayum.returnPackagesByDep(module)
if not pkgs:
log.warning("Didn't find any package providing %s" % module)
for pkg in pkgs:
log.info("selecting package %s for %s" % (pkg.name, module))
self.ayum.install(po=pkg)
def selectBestKernel(self, anaconda):
"""Find the best kernel package which is available and select it."""
def getBestKernelByArch(pkgname, ayum):
"""Convenience func to find the best arch of a kernel by name"""
try:
pkgs = ayum.pkgSack.returnNewestByName(pkgname)
except yum.Errors.PackageSackError:
return None
pkgs = self.ayum.bestPackagesFromList(pkgs)
if len(pkgs) == 0:
return None
return pkgs[0]
def selectKernel(pkgname):
try:
pkg = getBestKernelByArch(pkgname, self.ayum)
except PackageSackError:
log.debug("no %s package" % pkgname)
return False
if not pkg:
return False
log.info("selected %s package for kernel" % pkg.name)
self.ayum.install(po=pkg)
self.selectModulePackages(anaconda, pkg.name)
if len(self.ayum.tsInfo.matchNaevr(name="gcc")) > 0:
log.debug("selecting %s-devel" % pkg.name)
self.selectPackage("%s-devel.%s" % (pkg.name, pkg.arch))
return True
foundkernel = False
if not foundkernel and isys.isPaeAvailable():
if selectKernel("kernel-PAE"):
foundkernel = True
if not foundkernel:
selectKernel("kernel")
def selectFSPackages(self, storage):
for device in storage.fsset.devices:
# this takes care of device and filesystem packages
map(self.selectPackage, device.packages)
# anaconda requires several programs on the installed system to complete
# installation, but we have no guarantees that some of these will be
# installed (they could have been removed in kickstart). So we'll force
# it.
def selectAnacondaNeeds(self):
for pkg in ['authconfig', 'chkconfig', 'system-config-firewall-base']:
self.selectPackage(pkg)
def doPostSelection(self, anaconda):
# Only solve dependencies on the way through the installer, not the way back.
if anaconda.dir == DISPATCH_BACK:
return
dscb = YumDepSolveProgress(anaconda.intf, self.ayum)
self.ayum.dsCallback = dscb
# do some sanity checks for kernel and bootloader
if not anaconda.upgrade:
# New installs only - upgrades will already have all this stuff.
self.selectBestKernel(anaconda)
map(self.selectPackage, anaconda.platform.packages)
self.selectFSPackages(anaconda.storage)
self.selectAnacondaNeeds()
else:
self.ayum.update()
while True:
try:
(code, msgs) = self.ayum.buildTransaction()
# If %packages --ignoremissing was given, don't bother
# prompting for missing dependencies.
if anaconda.ksdata and anaconda.ksdata.packages.handleMissing == KS_MISSING_IGNORE:
break
if code == 1 and not anaconda.upgrade:
# resolveDeps returns 0 if empty transaction, 1 if error,
# 2 if success
depprob = "\n".join(msgs)
rc = anaconda.intf.detailedMessageWindow(_("Warning"),
_("Some of the packages you have selected for "
"install are missing dependencies. You can "
"exit the installation, go back and change "
"your package selections, or continue "
"installing these packages without their "
"dependencies. If you continue, these packages "
"may not work correctly due to missing components."),
depprob + "\n", type="custom", custom_icon="error",
custom_buttons=[_("_Exit installer"), _("_Back"),
_("_Continue")])
dscb.pop()
if rc == 0:
sys.exit(1)
elif rc == 1:
self.ayum._undoDepInstalls()
return DISPATCH_BACK
break
except RepoError, e:
# FIXME: would be nice to be able to recover here
rc = anaconda.intf.messageWindow(_("Error"),
_("Unable to read package metadata. This may be "
"due to a missing repodata directory. Please "
"ensure that your install tree has been "
"correctly generated.\n\n%s" % e),
type="custom", custom_icon="error",
custom_buttons=[_("_Exit installer"), _("_Retry")])
dscb.pop()
if rc == 0:
sys.exit(0)
else:
continue
else:
break
(self.dlpkgs, self.totalSize, self.totalFiles) = self.ayum.getDownloadPkgs()
if not anaconda.upgrade:
largePart = anaconda.storage.mountpoints.get("/usr", anaconda.storage.rootDevice)
if largePart and largePart.size < self.totalSize / 1024:
rc = anaconda.intf.messageWindow(_("Error"),
_("Your selected packages require %d MB "
"of free space for installation, but "
"you do not have enough available. "
"You can change your selections or "
"exit the installer." % (self.totalSize / 1024)),
type="custom", custom_icon="error",
custom_buttons=[_("_Back"), _("_Exit installer")])
dscb.pop()
if rc == 1:
sys.exit(1)
else:
self.ayum._undoDepInstalls()
return DISPATCH_BACK
dscb.pop()
if anaconda.mediaDevice and not anaconda.ksdata:
rc = presentRequiredMediaMessage(anaconda)
if rc == 0:
rc2 = anaconda.intf.messageWindow(_("Reboot?"),
_("The system will be rebooted now."),
type="custom", custom_icon="warning",
custom_buttons=[_("_Back"), _("_Reboot")])
if rc2 == 1:
sys.exit(0)
else:
return DISPATCH_BACK
elif rc == 1: # they asked to go back
return DISPATCH_BACK
self.ayum.dsCallback = None
def doPreInstall(self, anaconda):
if anaconda.dir == DISPATCH_BACK:
for d in ("/selinux", "/dev", "/proc/bus/usb"):
try:
isys.umount(anaconda.rootPath + d, removeDir = False)
except Exception, e:
log.error("unable to unmount %s: %s" %(d, e))
return
if anaconda.upgrade:
# An old mtab can cause confusion (esp if loop devices are
# in it). Be extra special careful and delete any mtab first,
# in case the user has done something funny like make it into
# a symlink.
if os.access(anaconda.rootPath + "/etc/mtab", os.F_OK):
os.remove(anaconda.rootPath + "/etc/mtab")
f = open(anaconda.rootPath + "/etc/mtab", "w+")
f.close()
# we really started writing modprobe.conf out before things were
# all completely ready. so now we need to nuke old modprobe.conf's
# if you're upgrading from a 2.4 dist so that we can get the
# transition right
if (os.path.exists(anaconda.rootPath + "/etc/modules.conf") and
os.path.exists(anaconda.rootPath + "/etc/modprobe.conf") and
not os.path.exists(anaconda.rootPath + "/etc/modprobe.conf.anacbak")):
log.info("renaming old modprobe.conf -> modprobe.conf.anacbak")
os.rename(anaconda.rootPath + "/etc/modprobe.conf",
anaconda.rootPath + "/etc/modprobe.conf.anacbak")
dirList = ['/var', '/var/lib', '/var/lib/rpm', '/tmp', '/dev', '/etc',
'/etc/sysconfig', '/etc/sysconfig/network-scripts',
'/etc/X11', '/root', '/var/tmp', '/etc/rpm', '/var/cache',
'/var/cache/yum', '/etc/modprobe.d']
# If there are any protected partitions we want to mount, create their
# mount points now.
for protected in anaconda.storage.protectedDevices:
if getattr(protected.format, "mountpoint", None):
dirList.append(protected.format.mountpoint)
for i in dirList:
try:
os.mkdir(anaconda.rootPath + i)
except os.error, (errno, msg):
pass
# log.error("Error making directory %s: %s" % (i, msg))
self.initLog(anaconda.rootPath)
try:
# FIXME: making the /var/lib/rpm symlink here is a hack to
# workaround db->close() errors from rpm
iutil.mkdirChain("/var/lib")
for path in ("/var/tmp", "/var/lib/rpm"):
if os.path.exists(path) and not os.path.islink(path):
shutil.rmtree(path)
if not os.path.islink(path):
os.symlink("%s/%s" %(anaconda.rootPath, path), "%s" %(path,))
else:
log.warning("%s already exists as a symlink to %s" %(path, os.readlink(path),))
except Exception, e:
# how this could happen isn't entirely clear; log it in case
# it does and causes problems later
log.error("error creating symlink, continuing anyway: %s" %(e,))
# SELinux hackery (#121369)
if flags.selinux:
try:
os.mkdir(anaconda.rootPath + "/selinux")
except Exception, e:
pass
try:
isys.mount("/selinux", anaconda.rootPath + "/selinux", "selinuxfs")
except Exception, e:
log.error("error mounting selinuxfs: %s" %(e,))
# For usbfs
try:
isys.mount("/proc/bus/usb", anaconda.rootPath + "/proc/bus/usb", "usbfs")
except Exception, e:
log.error("error mounting usbfs: %s" %(e,))
# write out the fstab
if not anaconda.upgrade:
anaconda.storage.fsset.write(anaconda.rootPath)
if os.access("/etc/modprobe.d/anaconda.conf", os.R_OK):
shutil.copyfile("/etc/modprobe.d/anaconda.conf",
anaconda.rootPath + "/etc/modprobe.d/anaconda.conf")
anaconda.network.write(instPath=anaconda.rootPath, anaconda=anaconda)
anaconda.storage.write(anaconda.rootPath)
if not anaconda.isHeadless:
anaconda.keyboard.write(anaconda.rootPath)
# make a /etc/mtab so mkinitrd can handle certain hw (usb) correctly
f = open(anaconda.rootPath + "/etc/mtab", "w+")
f.write(anaconda.storage.mtab)
f.close()
def checkSupportedUpgrade(self, anaconda):
if anaconda.dir == DISPATCH_BACK:
return
self._checkUpgradeVersion(anaconda)
self._checkUpgradeArch(anaconda)
def _checkUpgradeVersion(self, anaconda):
# Figure out current version for upgrade nag and for determining weird
# upgrade cases
supportedUpgradeVersion = -1
for pkgtup in self.ayum.rpmdb.whatProvides('redhat-release', None, None):
n, a, e, v, r = pkgtup
if supportedUpgradeVersion <= 0:
val = rpmUtils.miscutils.compareEVR((None, '3', '1'),
(e, v,r))
if val > 0:
supportedUpgradeVersion = 0
else:
supportedUpgradeVersion = 1
break
if "Red Hat Enterprise Linux" not in productName:
supportedUpgradeVersion = 1
if supportedUpgradeVersion == 0:
rc = anaconda.intf.messageWindow(_("Warning"),
_("You appear to be upgrading from a system "
"which is too old to upgrade to this "
"version of %s. Are you sure you wish to "
"continue the upgrade "
"process?") %(productName,),
type = "yesno")
if rc == 0:
iutil.resetRpmDb(anaconda.rootPath)
sys.exit(0)
def _checkUpgradeArch(self, anaconda):
def compareArch(a, b):
if re.match("i.86", a) and re.match("i.86", b):
return True
else:
return a == b
# get the arch of the initscripts package
try:
pkgs = self.ayum.pkgSack.returnNewestByName('initscripts')
except yum.Errors.PackageSackError:
log.info("no packages named initscripts")
return None
pkgs = self.ayum.bestPackagesFromList(pkgs)
if len(pkgs) == 0:
log.info("no best package")
return
myarch = pkgs[0].arch
log.info("initscripts is arch: %s" %(myarch,))
for po in self.ayum.rpmdb.getProvides('initscripts'):
log.info("po.arch is arch: %s" %(po.arch,))
if not compareArch(po.arch, myarch):
rc = anaconda.intf.messageWindow(_("Warning"),
_("The arch of the release of %(productName)s you "
"are upgrading to appears to be %(myarch)s which "
"does not match your previously installed arch of "
"%(arch)s. This is likely to not succeed. Are "
"you sure you wish to continue the upgrade "
"process?")
% {'productName': productName,
'myarch': myarch,
'arch': po.arch},
type="yesno")
if rc == 0:
iutil.resetRpmDb(anaconda.rootPath)
sys.exit(0)
else:
log.warning("upgrade between possibly incompatible "
"arches %s -> %s" %(po.arch, myarch))
break
def doInstall(self, anaconda):
log.info("Preparing to install packages")
if not anaconda.upgrade:
rpm.addMacro("__dbi_htconfig",
"hash nofsync %{__dbi_other} %{__dbi_perms}")
if anaconda.ksdata and anaconda.ksdata.packages.excludeDocs:
rpm.addMacro("_excludedocs", "1")
cb = AnacondaCallback(self.ayum, anaconda,
self.instLog, self.modeText)
cb.setSizes(len(self.dlpkgs), self.totalSize, self.totalFiles)
rc = self.ayum.run(self.instLog, cb, anaconda.intf, anaconda.id)
if cb.initWindow is not None:
cb.initWindow.pop()
self.instLog.write("*** FINISHED INSTALLING PACKAGES ***")
self.instLog.close ()
anaconda.intf.setInstallProgressClass(None)
if rc == DISPATCH_BACK:
return DISPATCH_BACK
def doPostInstall(self, anaconda):
if anaconda.upgrade:
w = anaconda.intf.waitWindow(_("Post Upgrade"),
_("Performing post-upgrade configuration"))
else:
w = anaconda.intf.waitWindow(_("Post Installation"),
_("Performing post-installation configuration"))
packages.rpmSetupGraphicalSystem(anaconda)
for repo in self.ayum.repos.listEnabled():
repo.dirCleanup()
# expire yum caches on upgrade
if anaconda.upgrade and os.path.exists("%s/var/cache/yum" %(anaconda.rootPath,)):
log.info("Expiring yum caches")
try:
iutil.execWithRedirect("yum", ["clean", "all"],
stdout="/dev/tty5", stderr="/dev/tty5",
root = anaconda.rootPath)
except:
pass
# nuke preupgrade
if flags.cmdline.has_key("preupgrade") and os.path.exists("%s/var/cache/yum/anaconda-upgrade" %(anaconda.rootPath,)):
try:
shutil.rmtree("%s/var/cache/yum/anaconda-upgrade" %(anaconda.rootPath,))
except:
pass
# XXX: write proper lvm config
AnacondaBackend.doPostInstall(self, anaconda)
w.pop()
def kernelVersionList(self, rootPath="/"):
# FIXME: using rpm here is a little lame, but otherwise, we'd
# be pulling in filelists
return packages.rpmKernelVersionList(rootPath)
def __getGroupId(self, group):
"""Get the groupid for the given name (english or translated)."""
for g in self.ayum.comps.groups:
if group == g.name:
return g.groupid
for trans in g.translated_name.values():
if group == trans:
return g.groupid
def isGroupSelected(self, group):
try:
grp = self.ayum.comps.return_group(group)
if grp.selected: return True
except yum.Errors.GroupsError, e:
pass
return False
def selectGroup(self, group, *args):
if not self.ayum.comps.has_group(group):
log.debug("no such group %s" % group)
raise NoSuchGroup, group
types = ["mandatory"]
if args:
if args[0][0]:
types.append("default")
if args[0][1]:
types.append("optional")
else:
types.append("default")
try:
mbrs = self.ayum.selectGroup(group, group_package_types=types)
if len(mbrs) == 0 and self.isGroupSelected(group):
return
except yum.Errors.GroupsError, e:
# try to find out if it's the name or translated name
gid = self.__getGroupId(group)
if gid is not None:
mbrs = self.ayum.selectGroup(gid, group_package_types=types)
if len(mbrs) == 0 and self.isGroupSelected(gid):
return
else:
log.debug("no such group %s" %(group,))
raise NoSuchGroup, group
def deselectGroup(self, group, *args):
try:
self.ayum.deselectGroup(group)
except yum.Errors.GroupsError, e:
# try to find out if it's the name or translated name
gid = self.__getGroupId(group)
if gid is not None:
self.ayum.deselectGroup(gid)
else:
log.debug("no such group %s" %(group,))
def selectPackage(self, pkg, *args):
if self.ayum.tsInfo.matchNaevr(name=pkg):
return 0
try:
mbrs = self.ayum.install(pattern=pkg)
return len(mbrs)
except yum.Errors.InstallError:
log.debug("no package matching %s" %(pkg,))
return 0
def deselectPackage(self, pkg, *args):
sp = pkg.rsplit(".", 2)
txmbrs = []
if len(sp) == 2:
txmbrs = self.ayum.tsInfo.matchNaevr(name=sp[0], arch=sp[1])
if len(txmbrs) == 0:
exact, match, unmatch = yum.packages.parsePackages(self.ayum.pkgSack.returnPackages(), [pkg], casematch=1)
for p in exact + match:
txmbrs.append(p)
if len(txmbrs) > 0:
for x in txmbrs:
self.ayum.tsInfo.remove(x.pkgtup)
# we also need to remove from the conditionals
# dict so that things don't get pulled back in as a result
# of them. yes, this is ugly. conditionals should die.
for req, pkgs in self.ayum.tsInfo.conditionals.iteritems():
if x in pkgs:
pkgs.remove(x)
self.ayum.tsInfo.conditionals[req] = pkgs
return len(txmbrs)
else:
log.debug("no such package %s to remove" %(pkg,))
return 0
def groupListExists(self, grps):
"""Returns bool of whether all of the given groups exist."""
for gid in grps:
g = self.ayum.comps.return_group(gid)
if not g:
return False
return True
def groupListDefault(self, grps):
"""Returns bool of whether all of the given groups are default"""
rc = False
for gid in grps:
g = self.ayum.comps.return_group(gid)
if g and not g.default:
return False
elif g:
rc = True
return rc
def writeKS(self, f):
for repo in self.ayum.repos.listEnabled():
if repo.name == "Installation Repo":
continue
line = "repo --name=\"%s\" " % (repo.name or repo.repoid)
if repo.baseurl:
line += " --baseurl=%s\n" % repo.baseurl[0]
else:
line += " --mirrorlist=%s\n" % repo.mirrorlist
f.write(line)
def writePackagesKS(self, f, anaconda):
if anaconda.ksdata:
f.write(anaconda.ksdata.packages.__str__())
return
groups = []
installed = []
removed = []
# Faster to grab all the package names up front rather than call
# searchNevra in the loop below.
allPkgNames = map(lambda pkg: pkg.name, self.ayum.pkgSack.returnPackages())
allPkgNames.sort()
# On CD/DVD installs, we have one transaction per CD and will end up
# checking allPkgNames against a very short list of packages. So we
# have to reset to media #0, which is an all packages transaction.
old = self.ayum.tsInfo.curmedia
self.ayum.tsInfo.curmedia = 0
self.ayum.tsInfo.makelists()
txmbrNames = map (lambda x: x.name, self.ayum.tsInfo.getMembers())
self.ayum.tsInfo.curmedia = old
if len(self.ayum.tsInfo.instgroups) == 0 and len(txmbrNames) == 0:
return
f.write("\n%packages\n")
for grp in filter(lambda x: x.selected, self.ayum.comps.groups):
groups.append(grp.groupid)
defaults = grp.default_packages.keys() + grp.mandatory_packages.keys()
optionals = grp.optional_packages.keys()
for pkg in filter(lambda x: x in defaults and (not x in txmbrNames and x in allPkgNames), grp.packages):
removed.append(pkg)
for pkg in filter(lambda x: x in txmbrNames, optionals):
installed.append(pkg)
for grp in groups:
f.write("@%s\n" % grp)
for pkg in installed:
f.write("%s\n" % pkg)
for pkg in removed:
f.write("-%s\n" % pkg)
f.write("%end")
def writeConfiguration(self):
return
def getRequiredMedia(self):
return self.ayum.tsInfo.reqmedia.keys()
class DownloadHeaderProgress:
def __init__(self, intf, ayum=None):
window = intf.progressWindow(_("Installation Starting"),
_("Starting installation process"),
1.0, 0.01)
self.window = window
self.ayum = ayum
self.current = self.loopstart = 0
self.incr = 1
if self.ayum is not None and self.ayum.tsInfo is not None:
self.numpkgs = len(self.ayum.tsInfo.getMembers())
if self.numpkgs != 0:
self.incr = (1.0 / self.numpkgs) * (1.0 - self.loopstart)
else:
self.numpkgs = 0
self.refresh()
self.restartLoop = self.downloadHeader = self.transactionPopulation = self.refresh
self.procReq = self.procConflict = self.unresolved = self.noop
def noop(self, *args, **kwargs):
pass
def pkgAdded(self, *args):
if self.numpkgs:
self.set(self.current + self.incr)
def pop(self):
self.window.pop()
def refresh(self, *args):
self.window.refresh()
def set(self, value):
self.current = value
self.window.set(self.current)
class YumDepSolveProgress:
def __init__(self, intf, ayum = None):
window = intf.progressWindow(_("Dependency Check"),
_("Checking dependencies in packages selected for installation"),
1.0, 0.01)
self.window = window
self.numpkgs = None
self.loopstart = None
self.incr = None
self.ayum = ayum
self.current = 0
self.restartLoop = self.downloadHeader = self.transactionPopulation = self.refresh
self.procReq = self.procConflict = self.unresolved = self.noop
def tscheck(self, num = None):
self.refresh()
if num is None and self.ayum is not None and self.ayum.tsInfo is not None:
num = len(self.ayum.tsInfo.getMembers())
if num:
self.numpkgs = num
self.loopstart = self.current
self.incr = (1.0 / num) * ((1.0 - self.loopstart) / 2)
def pkgAdded(self, *args):
if self.numpkgs:
self.set(self.current + self.incr)
def noop(self, *args, **kwargs):
pass
def refresh(self, *args):
self.window.refresh()
def set(self, value):
self.current = value
self.window.set(self.current)
def start(self):
self.set(0.0)
self.refresh()
def end(self):
self.window.set(1.0)
self.window.refresh()
def pop(self):
self.window.pop()
| Rogentos/legacy-anaconda | yuminstall.py | Python | gpl-2.0 | 77,924 |
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
import datetime
import logging
import unittest
import clitests
from abrtcli.match import (get_match_data,
match_completer,
match_get_problems,
match_lookup)
from abrtcli.utils import captured_output
class MatchTestCase(clitests.TestCase):
'''
Simple test to check if database creation & access
works as expected.
'''
hashes = ['ccacca5', 'bc60a5c', 'acbea5c', 'ffe635c']
collision_hash = 'bc60a5c'
human = ['/home/user/bin/user_app', 'unknown_problem', 'polkitd']
collision_human = 'pavucontrol'
combined = ['pavucontrol@bc60a5c', 'pavucontrol@acbea5c']
paths = [
'/var/tmp/abrt/ccpp-2015-03-16-14:41:47-7729',
'/var/tmp/abrt/ccpp-2015-06-16-14:41:47-7729',
'/var/tmp/abrt/ccpp-2015-05-16-14:41:47-7729',
'/var/tmp/abrt/ccpp-2014-03-16-14:41:47-7729',
]
def test_get_match_data(self):
'''
Test get_match_data returns correctly merged data
'''
by_human_id, by_short_id, by_path = get_match_data()
self.assertEqual(len(by_human_id), 4)
self.assertEqual(len(by_short_id), 4)
self.assertEqual(len(by_path), 4)
def test_match_completer(self):
'''
Test that match_completer yields properly formatted candidates
'''
pm = match_completer(None, None)
self.assertEqual(set(pm), set(self.hashes + self.human + self.combined + self.paths))
def test_match_get_problems(self):
with self.assertRaises(SystemExit):
match_get_problems(['adun toridas'])
def test_match_combo(self):
'''
Test matching based on combinations of criteria
'''
since = datetime.datetime(2015, 5, 1)
until = datetime.datetime(2015, 7, 1)
matches = match_lookup(['bc60a5cbddb4e3667511e718ceecac16133acc97'],
since=since.timestamp(),
until=until.timestamp(),
not_reported=True)
self.assertEqual(len(matches), 1)
matches = match_lookup(['bc60a5cbddb4e3667511e718ceecac16133acc97'],
since=since.timestamp(),
until=until.timestamp())
self.assertEqual(len(matches), 2)
matches = match_lookup(['bc60a5cbddb4e3667511e718ceecac16133acc97'],
components=['pavucontrol'],
since=since.timestamp(),
n_latest=1)
self.assertEqual(len(matches), 1)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
unittest.main()
| mkutlak/abrt | src/cli/tests/test_match.py | Python | gpl-2.0 | 2,765 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Utilities for interaction with SVN repositories"""
import errno
from os import chdir
from subprocess import check_call as call
from invenio.base.globals import cfg
import tarfile
from tempfile import mkdtemp
from shutil import rmtree
def svn_exists():
"""
Returns True if SVN is installed, else False
"""
if cfg['CFG_PATH_SVN']:
return True
else:
return False
def get_which_svn():
"""
Gets which SVN is being used
:returns: path to SVN
"""
return cfg['CFG_PATH_SVN']
def harvest_repo(root_url, archive_path, tag=None, archive_mode='w:gz'):
"""
Archives a specific tag in a specific SVN repository.
:param root_url: This is the root url of the repo and should end in the
repo name.
:param archive_path: Where the archive will be stored - Must end in a
valid extension that matches the archive_mode type. Default requires
'tar.gz'
:param tag: This is the tag you want to harvest, None=HEAD
:param archive_mode: See 'tarfile.open' modes default w:gz > tar.gz
"""
if not svn_exists():
raise Exception("SVN not found. It probably needs installing.")
clone_path = mkdtemp(dir=cfg['CFG_TMPDIR'])
svn = get_which_svn()
if tag:
call([svn, 'co', root_url + '/tags/' + tag, clone_path])
else:
call([svn, 'co', root_url + '/trunk/', clone_path])
chdir(cfg['CFG_TMPDIR'])
tar = tarfile.open(name=archive_path, mode=archive_mode)
tar.add(clone_path, arcname=root_url.split('/').pop())
tar.close()
try:
rmtree(clone_path)
except OSError as e:
# Reraise unless ENOENT: No such file or directory
# (ok if directory has already been deleted)
if e.errno != errno.ENOENT:
raise
| MSusik/invenio | invenio/utils/vcs/svn.py | Python | gpl-2.0 | 2,585 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0005_event_outcome_reason'),
]
operations = [
migrations.CreateModel(
name='RelatedEvent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('event', models.ForeignKey(related_query_name=b'this_event', related_name='these_events', to='events.Event', null=True)),
('related', models.ForeignKey(related_query_name=b'related', related_name='relates', to='events.Event', null=True)),
],
),
]
| JulianVolodia/Politikon | events/migrations/0006_relatedevent.py | Python | gpl-2.0 | 749 |
"""Default tags used by the template system, available to all templates."""
from __future__ import unicode_literals
import os
import sys
import re
from datetime import datetime
from itertools import groupby, cycle as itertools_cycle
import warnings
from django.conf import settings
from django.template.base import (Node, NodeList, Template, Context, Library,
TemplateSyntaxError, VariableDoesNotExist, InvalidTemplateLibrary,
BLOCK_TAG_START, BLOCK_TAG_END, VARIABLE_TAG_START, VARIABLE_TAG_END,
SINGLE_BRACE_START, SINGLE_BRACE_END, COMMENT_TAG_START, COMMENT_TAG_END,
VARIABLE_ATTRIBUTE_SEPARATOR, get_library, token_kwargs, kwarg_re,
render_value_in_context)
from django.template.smartif import IfParser, Literal
from django.template.defaultfilters import date
from django.utils.deprecation import RemovedInDjango18Warning
from django.utils.encoding import force_text, smart_text
from django.utils.safestring import mark_safe
from django.utils.html import format_html
from django.utils import six
from django.utils import timezone
register = Library()
class AutoEscapeControlNode(Node):
"""Implements the actions of the autoescape tag."""
def __init__(self, setting, nodelist):
self.setting, self.nodelist = setting, nodelist
def render(self, context):
old_setting = context.autoescape
context.autoescape = self.setting
output = self.nodelist.render(context)
context.autoescape = old_setting
if self.setting:
return mark_safe(output)
else:
return output
class CommentNode(Node):
def render(self, context):
return ''
class CsrfTokenNode(Node):
def render(self, context):
csrf_token = context.get('csrf_token', None)
if csrf_token:
if csrf_token == 'NOTPROVIDED':
return format_html("")
else:
return format_html("<input type='hidden' name='csrfmiddlewaretoken' value='{0}' />", csrf_token)
else:
# It's very probable that the token is missing because of
# misconfiguration, so we raise a warning
if settings.DEBUG:
warnings.warn("A {% csrf_token %} was used in a template, but the context did not provide the value. This is usually caused by not using RequestContext.")
return ''
class CycleNode(Node):
def __init__(self, cyclevars, variable_name=None, silent=False, escape=False):
self.cyclevars = cyclevars
self.variable_name = variable_name
self.silent = silent
self.escape = escape # only while the "future" version exists
def render(self, context):
if self not in context.render_context:
# First time the node is rendered in template
context.render_context[self] = itertools_cycle(self.cyclevars)
cycle_iter = context.render_context[self]
value = next(cycle_iter).resolve(context)
if self.variable_name:
context[self.variable_name] = value
if self.silent:
return ''
if not self.escape:
value = mark_safe(value)
return render_value_in_context(value, context)
class DebugNode(Node):
def render(self, context):
from pprint import pformat
output = [pformat(val) for val in context]
output.append('\n\n')
output.append(pformat(sys.modules))
return ''.join(output)
class FilterNode(Node):
def __init__(self, filter_expr, nodelist):
self.filter_expr, self.nodelist = filter_expr, nodelist
def render(self, context):
output = self.nodelist.render(context)
# Apply filters.
with context.push(var=output):
return self.filter_expr.resolve(context)
class FirstOfNode(Node):
def __init__(self, variables, escape=False):
self.vars = variables
self.escape = escape # only while the "future" version exists
def render(self, context):
for var in self.vars:
value = var.resolve(context, True)
if value:
if not self.escape:
value = mark_safe(value)
return render_value_in_context(value, context)
return ''
class ForNode(Node):
child_nodelists = ('nodelist_loop', 'nodelist_empty')
def __init__(self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None):
self.loopvars, self.sequence = loopvars, sequence
self.is_reversed = is_reversed
self.nodelist_loop = nodelist_loop
if nodelist_empty is None:
self.nodelist_empty = NodeList()
else:
self.nodelist_empty = nodelist_empty
def __repr__(self):
reversed_text = ' reversed' if self.is_reversed else ''
return "<For Node: for %s in %s, tail_len: %d%s>" % \
(', '.join(self.loopvars), self.sequence, len(self.nodelist_loop),
reversed_text)
def __iter__(self):
for node in self.nodelist_loop:
yield node
for node in self.nodelist_empty:
yield node
def render(self, context):
if 'forloop' in context:
parentloop = context['forloop']
else:
parentloop = {}
with context.push():
try:
values = self.sequence.resolve(context, True)
except VariableDoesNotExist:
values = []
if values is None:
values = []
if not hasattr(values, '__len__'):
values = list(values)
len_values = len(values)
if len_values < 1:
return self.nodelist_empty.render(context)
nodelist = []
if self.is_reversed:
values = reversed(values)
unpack = len(self.loopvars) > 1
# Create a forloop value in the context. We'll update counters on each
# iteration just below.
loop_dict = context['forloop'] = {'parentloop': parentloop}
for i, item in enumerate(values):
# Shortcuts for current loop iteration number.
loop_dict['counter0'] = i
loop_dict['counter'] = i + 1
# Reverse counter iteration numbers.
loop_dict['revcounter'] = len_values - i
loop_dict['revcounter0'] = len_values - i - 1
# Boolean values designating first and last times through loop.
loop_dict['first'] = (i == 0)
loop_dict['last'] = (i == len_values - 1)
pop_context = False
if unpack:
# If there are multiple loop variables, unpack the item into
# them.
try:
unpacked_vars = dict(zip(self.loopvars, item))
except TypeError:
pass
else:
pop_context = True
context.update(unpacked_vars)
else:
context[self.loopvars[0]] = item
# In TEMPLATE_DEBUG mode provide source of the node which
# actually raised the exception
if settings.TEMPLATE_DEBUG:
for node in self.nodelist_loop:
try:
nodelist.append(node.render(context))
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = node.source
raise
else:
for node in self.nodelist_loop:
nodelist.append(node.render(context))
if pop_context:
# The loop variables were pushed on to the context so pop them
# off again. This is necessary because the tag lets the length
# of loopvars differ to the length of each set of items and we
# don't want to leave any vars from the previous loop on the
# context.
context.pop()
return mark_safe(''.join(force_text(n) for n in nodelist))
class IfChangedNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, nodelist_true, nodelist_false, *varlist):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self._varlist = varlist
def render(self, context):
# Init state storage
state_frame = self._get_context_stack_frame(context)
if self not in state_frame:
state_frame[self] = None
nodelist_true_output = None
try:
if self._varlist:
# Consider multiple parameters. This automatically behaves
# like an OR evaluation of the multiple variables.
compare_to = [var.resolve(context, True) for var in self._varlist]
else:
# The "{% ifchanged %}" syntax (without any variables) compares the rendered output.
compare_to = nodelist_true_output = self.nodelist_true.render(context)
except VariableDoesNotExist:
compare_to = None
if compare_to != state_frame[self]:
state_frame[self] = compare_to
return nodelist_true_output or self.nodelist_true.render(context) # render true block if not already rendered
elif self.nodelist_false:
return self.nodelist_false.render(context)
return ''
def _get_context_stack_frame(self, context):
# The Context object behaves like a stack where each template tag can create a new scope.
# Find the place where to store the state to detect changes.
if 'forloop' in context:
# Ifchanged is bound to the local for loop.
# When there is a loop-in-loop, the state is bound to the inner loop,
# so it resets when the outer loop continues.
return context['forloop']
else:
# Using ifchanged outside loops. Effectively this is a no-op because the state is associated with 'self'.
return context.render_context
class IfEqualNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, var1, var2, nodelist_true, nodelist_false, negate):
self.var1, self.var2 = var1, var2
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.negate = negate
def __repr__(self):
return "<IfEqualNode>"
def render(self, context):
val1 = self.var1.resolve(context, True)
val2 = self.var2.resolve(context, True)
if (self.negate and val1 != val2) or (not self.negate and val1 == val2):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
class IfNode(Node):
def __init__(self, conditions_nodelists):
self.conditions_nodelists = conditions_nodelists
def __repr__(self):
return "<IfNode>"
def __iter__(self):
for _, nodelist in self.conditions_nodelists:
for node in nodelist:
yield node
@property
def nodelist(self):
return NodeList(node for _, nodelist in self.conditions_nodelists for node in nodelist)
def render(self, context):
for condition, nodelist in self.conditions_nodelists:
if condition is not None: # if / elif clause
try:
match = condition.eval(context)
except VariableDoesNotExist:
match = None
else: # else clause
match = True
if match:
return nodelist.render(context)
return ''
class RegroupNode(Node):
def __init__(self, target, expression, var_name):
self.target, self.expression = target, expression
self.var_name = var_name
def resolve_expression(self, obj, context):
# This method is called for each object in self.target. See regroup()
# for the reason why we temporarily put the object in the context.
context[self.var_name] = obj
return self.expression.resolve(context, True)
def render(self, context):
obj_list = self.target.resolve(context, True)
if obj_list is None:
# target variable wasn't found in context; fail silently.
context[self.var_name] = []
return ''
# List of dictionaries in the format:
# {'grouper': 'key', 'list': [list of contents]}.
context[self.var_name] = [
{'grouper': key, 'list': list(val)}
for key, val in
groupby(obj_list, lambda obj: self.resolve_expression(obj, context))
]
return ''
def include_is_allowed(filepath):
filepath = os.path.abspath(filepath)
for root in settings.ALLOWED_INCLUDE_ROOTS:
if filepath.startswith(root):
return True
return False
class SsiNode(Node):
def __init__(self, filepath, parsed):
self.filepath = filepath
self.parsed = parsed
def render(self, context):
filepath = self.filepath.resolve(context)
if not include_is_allowed(filepath):
if settings.DEBUG:
return "[Didn't have permission to include file]"
else:
return '' # Fail silently for invalid includes.
try:
with open(filepath, 'r') as fp:
output = fp.read()
except IOError:
output = ''
if self.parsed:
try:
t = Template(output, name=filepath)
return t.render(context)
except TemplateSyntaxError as e:
if settings.DEBUG:
return "[Included template had syntax error: %s]" % e
else:
return '' # Fail silently for invalid included templates.
return output
class LoadNode(Node):
def render(self, context):
return ''
class NowNode(Node):
def __init__(self, format_string):
self.format_string = format_string
def render(self, context):
tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None
return date(datetime.now(tz=tzinfo), self.format_string)
class SpacelessNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
from django.utils.html import strip_spaces_between_tags
return strip_spaces_between_tags(self.nodelist.render(context).strip())
class TemplateTagNode(Node):
mapping = {'openblock': BLOCK_TAG_START,
'closeblock': BLOCK_TAG_END,
'openvariable': VARIABLE_TAG_START,
'closevariable': VARIABLE_TAG_END,
'openbrace': SINGLE_BRACE_START,
'closebrace': SINGLE_BRACE_END,
'opencomment': COMMENT_TAG_START,
'closecomment': COMMENT_TAG_END,
}
def __init__(self, tagtype):
self.tagtype = tagtype
def render(self, context):
return self.mapping.get(self.tagtype, '')
class URLNode(Node):
def __init__(self, view_name, args, kwargs, asvar):
self.view_name = view_name
self.args = args
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
from django.core.urlresolvers import reverse, NoReverseMatch
args = [arg.resolve(context) for arg in self.args]
kwargs = dict((smart_text(k, 'ascii'), v.resolve(context))
for k, v in self.kwargs.items())
view_name = self.view_name.resolve(context)
# Try to look up the URL twice: once given the view name, and again
# relative to what we guess is the "main" app. If they both fail,
# re-raise the NoReverseMatch unless we're using the
# {% url ... as var %} construct in which case return nothing.
url = ''
try:
url = reverse(view_name, args=args, kwargs=kwargs, current_app=context.current_app)
except NoReverseMatch:
exc_info = sys.exc_info()
if settings.SETTINGS_MODULE:
project_name = settings.SETTINGS_MODULE.split('.')[0]
try:
url = reverse(project_name + '.' + view_name,
args=args, kwargs=kwargs,
current_app=context.current_app)
except NoReverseMatch:
if self.asvar is None:
# Re-raise the original exception, not the one with
# the path relative to the project. This makes a
# better error message.
six.reraise(*exc_info)
else:
if self.asvar is None:
raise
if self.asvar:
context[self.asvar] = url
return ''
else:
return url
class VerbatimNode(Node):
def __init__(self, content):
self.content = content
def render(self, context):
return self.content
class WidthRatioNode(Node):
def __init__(self, val_expr, max_expr, max_width, asvar=None):
self.val_expr = val_expr
self.max_expr = max_expr
self.max_width = max_width
self.asvar = asvar
def render(self, context):
try:
value = self.val_expr.resolve(context)
max_value = self.max_expr.resolve(context)
max_width = int(self.max_width.resolve(context))
except VariableDoesNotExist:
return ''
except (ValueError, TypeError):
raise TemplateSyntaxError("widthratio final argument must be a number")
try:
value = float(value)
max_value = float(max_value)
ratio = (value / max_value) * max_width
result = str(int(round(ratio)))
except ZeroDivisionError:
return '0'
except (ValueError, TypeError, OverflowError):
return ''
if self.asvar:
context[self.asvar] = result
return ''
else:
return result
class WithNode(Node):
def __init__(self, var, name, nodelist, extra_context=None):
self.nodelist = nodelist
# var and name are legacy attributes, being left in case they are used
# by third-party subclasses of this Node.
self.extra_context = extra_context or {}
if name:
self.extra_context[name] = var
def __repr__(self):
return "<WithNode>"
def render(self, context):
values = dict((key, val.resolve(context)) for key, val in
six.iteritems(self.extra_context))
with context.push(**values):
return self.nodelist.render(context)
@register.tag
def autoescape(parser, token):
"""
Force autoescape behavior for this block.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 2:
raise TemplateSyntaxError("'autoescape' tag requires exactly one argument.")
arg = args[1]
if arg not in ('on', 'off'):
raise TemplateSyntaxError("'autoescape' argument should be 'on' or 'off'")
nodelist = parser.parse(('endautoescape',))
parser.delete_first_token()
return AutoEscapeControlNode((arg == 'on'), nodelist)
@register.tag
def comment(parser, token):
"""
Ignores everything between ``{% comment %}`` and ``{% endcomment %}``.
"""
parser.skip_past('endcomment')
return CommentNode()
@register.tag
def cycle(parser, token, escape=False):
"""
Cycles among the given strings each time this tag is encountered.
Within a loop, cycles among the given strings each time through
the loop::
{% for o in some_list %}
<tr class="{% cycle 'row1' 'row2' %}">
...
</tr>
{% endfor %}
Outside of a loop, give the values a unique name the first time you call
it, then use that name each successive time through::
<tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
You can use any number of values, separated by spaces. Commas can also
be used to separate values; if a comma is used, the cycle values are
interpreted as literal strings.
The optional flag "silent" can be used to prevent the cycle declaration
from returning any value::
{% for o in some_list %}
{% cycle 'row1' 'row2' as rowcolors silent %}
<tr class="{{ rowcolors }}">{% include "subtemplate.html " %}</tr>
{% endfor %}
"""
if not escape:
warnings.warn(
"'The `cycle` template tag is changing to escape its arguments; "
"the non-autoescaping version is deprecated. Load it "
"from the `future` tag library to start using the new behavior.",
RemovedInDjango18Warning, stacklevel=2)
# Note: This returns the exact same node on each {% cycle name %} call;
# that is, the node object returned from {% cycle a b c as name %} and the
# one returned from {% cycle name %} are the exact same object. This
# shouldn't cause problems (heh), but if it does, now you know.
#
# Ugly hack warning: This stuffs the named template dict into parser so
# that names are only unique within each template (as opposed to using
# a global variable, which would make cycle names have to be unique across
# *all* templates.
args = token.split_contents()
if len(args) < 2:
raise TemplateSyntaxError("'cycle' tag requires at least two arguments")
if ',' in args[1]:
# Backwards compatibility: {% cycle a,b %} or {% cycle a,b as foo %}
# case.
args[1:2] = ['"%s"' % arg for arg in args[1].split(",")]
if len(args) == 2:
# {% cycle foo %} case.
name = args[1]
if not hasattr(parser, '_namedCycleNodes'):
raise TemplateSyntaxError("No named cycles in template. '%s' is not defined" % name)
if name not in parser._namedCycleNodes:
raise TemplateSyntaxError("Named cycle '%s' does not exist" % name)
return parser._namedCycleNodes[name]
as_form = False
if len(args) > 4:
# {% cycle ... as foo [silent] %} case.
if args[-3] == "as":
if args[-1] != "silent":
raise TemplateSyntaxError("Only 'silent' flag is allowed after cycle's name, not '%s'." % args[-1])
as_form = True
silent = True
args = args[:-1]
elif args[-2] == "as":
as_form = True
silent = False
if as_form:
name = args[-1]
values = [parser.compile_filter(arg) for arg in args[1:-2]]
node = CycleNode(values, name, silent=silent, escape=escape)
if not hasattr(parser, '_namedCycleNodes'):
parser._namedCycleNodes = {}
parser._namedCycleNodes[name] = node
else:
values = [parser.compile_filter(arg) for arg in args[1:]]
node = CycleNode(values, escape=escape)
return node
@register.tag
def csrf_token(parser, token):
return CsrfTokenNode()
@register.tag
def debug(parser, token):
"""
Outputs a whole load of debugging information, including the current
context and imported modules.
Sample usage::
<pre>
{% debug %}
</pre>
"""
return DebugNode()
@register.tag('filter')
def do_filter(parser, token):
"""
Filters the contents of the block through variable filters.
Filters can also be piped through each other, and they can have
arguments -- just like in variable syntax.
Sample usage::
{% filter force_escape|lower %}
This text will be HTML-escaped, and will appear in lowercase.
{% endfilter %}
Note that the ``escape`` and ``safe`` filters are not acceptable arguments.
Instead, use the ``autoescape`` tag to manage autoescaping for blocks of
template code.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
_, rest = token.contents.split(None, 1)
filter_expr = parser.compile_filter("var|%s" % (rest))
for func, unused in filter_expr.filters:
filter_name = getattr(func, '_filter_name', None)
if filter_name in ('escape', 'safe'):
raise TemplateSyntaxError('"filter %s" is not permitted. Use the "autoescape" tag instead.' % filter_name)
nodelist = parser.parse(('endfilter',))
parser.delete_first_token()
return FilterNode(filter_expr, nodelist)
@register.tag
def firstof(parser, token, escape=False):
"""
Outputs the first variable passed that is not False, without escaping.
Outputs nothing if all the passed variables are False.
Sample usage::
{% firstof var1 var2 var3 %}
This is equivalent to::
{% if var1 %}
{{ var1|safe }}
{% elif var2 %}
{{ var2|safe }}
{% elif var3 %}
{{ var3|safe }}
{% endif %}
but obviously much cleaner!
You can also use a literal string as a fallback value in case all
passed variables are False::
{% firstof var1 var2 var3 "fallback value" %}
If you want to escape the output, use a filter tag::
{% filter force_escape %}
{% firstof var1 var2 var3 "fallback value" %}
{% endfilter %}
"""
if not escape:
warnings.warn(
"'The `firstof` template tag is changing to escape its arguments; "
"the non-autoescaping version is deprecated. Load it "
"from the `future` tag library to start using the new behavior.",
RemovedInDjango18Warning, stacklevel=2)
bits = token.split_contents()[1:]
if len(bits) < 1:
raise TemplateSyntaxError("'firstof' statement requires at least one argument")
return FirstOfNode([parser.compile_filter(bit) for bit in bits], escape=escape)
@register.tag('for')
def do_for(parser, token):
"""
Loops over each item in an array.
For example, to display a list of athletes given ``athlete_list``::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
</ul>
You can loop over a list in reverse by using
``{% for obj in list reversed %}``.
You can also unpack multiple values from a two-dimensional array::
{% for key,value in dict.items %}
{{ key }}: {{ value }}
{% endfor %}
The ``for`` tag can take an optional ``{% empty %}`` clause that will
be displayed if the given array is empty or could not be found::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% empty %}
<li>Sorry, no athletes in this list.</li>
{% endfor %}
<ul>
The above is equivalent to -- but shorter, cleaner, and possibly faster
than -- the following::
<ul>
{% if althete_list %}
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
{% else %}
<li>Sorry, no athletes in this list.</li>
{% endif %}
</ul>
The for loop sets a number of variables available within the loop:
========================== ================================================
Variable Description
========================== ================================================
``forloop.counter`` The current iteration of the loop (1-indexed)
``forloop.counter0`` The current iteration of the loop (0-indexed)
``forloop.revcounter`` The number of iterations from the end of the
loop (1-indexed)
``forloop.revcounter0`` The number of iterations from the end of the
loop (0-indexed)
``forloop.first`` True if this is the first time through the loop
``forloop.last`` True if this is the last time through the loop
``forloop.parentloop`` For nested loops, this is the loop "above" the
current one
========================== ================================================
"""
bits = token.split_contents()
if len(bits) < 4:
raise TemplateSyntaxError("'for' statements should have at least four"
" words: %s" % token.contents)
is_reversed = bits[-1] == 'reversed'
in_index = -3 if is_reversed else -2
if bits[in_index] != 'in':
raise TemplateSyntaxError("'for' statements should use the format"
" 'for x in y': %s" % token.contents)
loopvars = re.split(r' *, *', ' '.join(bits[1:in_index]))
for var in loopvars:
if not var or ' ' in var:
raise TemplateSyntaxError("'for' tag received an invalid argument:"
" %s" % token.contents)
sequence = parser.compile_filter(bits[in_index + 1])
nodelist_loop = parser.parse(('empty', 'endfor',))
token = parser.next_token()
if token.contents == 'empty':
nodelist_empty = parser.parse(('endfor',))
parser.delete_first_token()
else:
nodelist_empty = None
return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty)
def do_ifequal(parser, token, negate):
bits = list(token.split_contents())
if len(bits) != 3:
raise TemplateSyntaxError("%r takes two arguments" % bits[0])
end_tag = 'end' + bits[0]
nodelist_true = parser.parse(('else', end_tag))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse((end_tag,))
parser.delete_first_token()
else:
nodelist_false = NodeList()
val1 = parser.compile_filter(bits[1])
val2 = parser.compile_filter(bits[2])
return IfEqualNode(val1, val2, nodelist_true, nodelist_false, negate)
@register.tag
def ifequal(parser, token):
"""
Outputs the contents of the block if the two arguments equal each other.
Examples::
{% ifequal user.id comment.user_id %}
...
{% endifequal %}
{% ifnotequal user.id comment.user_id %}
...
{% else %}
...
{% endifnotequal %}
"""
return do_ifequal(parser, token, False)
@register.tag
def ifnotequal(parser, token):
"""
Outputs the contents of the block if the two arguments are not equal.
See ifequal.
"""
return do_ifequal(parser, token, True)
class TemplateLiteral(Literal):
def __init__(self, value, text):
self.value = value
self.text = text # for better error messages
def display(self):
return self.text
def eval(self, context):
return self.value.resolve(context, ignore_failures=True)
class TemplateIfParser(IfParser):
error_class = TemplateSyntaxError
def __init__(self, parser, *args, **kwargs):
self.template_parser = parser
super(TemplateIfParser, self).__init__(*args, **kwargs)
def create_var(self, value):
return TemplateLiteral(self.template_parser.compile_filter(value), value)
@register.tag('if')
def do_if(parser, token):
"""
The ``{% if %}`` tag evaluates a variable, and if that variable is "true"
(i.e., exists, is not empty, and is not a false boolean value), the
contents of the block are output:
::
{% if athlete_list %}
Number of athletes: {{ athlete_list|count }}
{% elif athlete_in_locker_room_list %}
Athletes should be out of the locker room soon!
{% else %}
No athletes.
{% endif %}
In the above, if ``athlete_list`` is not empty, the number of athletes will
be displayed by the ``{{ athlete_list|count }}`` variable.
As you can see, the ``if`` tag may take one or several `` {% elif %}``
clauses, as well as an ``{% else %}`` clause that will be displayed if all
previous conditions fail. These clauses are optional.
``if`` tags may use ``or``, ``and`` or ``not`` to test a number of
variables or to negate a given variable::
{% if not athlete_list %}
There are no athletes.
{% endif %}
{% if athlete_list or coach_list %}
There are some athletes or some coaches.
{% endif %}
{% if athlete_list and coach_list %}
Both athletes and coaches are available.
{% endif %}
{% if not athlete_list or coach_list %}
There are no athletes, or there are some coaches.
{% endif %}
{% if athlete_list and not coach_list %}
There are some athletes and absolutely no coaches.
{% endif %}
Comparison operators are also available, and the use of filters is also
allowed, for example::
{% if articles|length >= 5 %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid if tag.
All supported operators are: ``or``, ``and``, ``in``, ``not in``
``==`` (or ``=``), ``!=``, ``>``, ``>=``, ``<`` and ``<=``.
Operator precedence follows Python.
"""
# {% if ... %}
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(('elif', 'else', 'endif'))
conditions_nodelists = [(condition, nodelist)]
token = parser.next_token()
# {% elif ... %} (repeatable)
while token.contents.startswith('elif'):
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(('elif', 'else', 'endif'))
conditions_nodelists.append((condition, nodelist))
token = parser.next_token()
# {% else %} (optional)
if token.contents == 'else':
nodelist = parser.parse(('endif',))
conditions_nodelists.append((None, nodelist))
token = parser.next_token()
# {% endif %}
assert token.contents == 'endif'
return IfNode(conditions_nodelists)
@register.tag
def ifchanged(parser, token):
"""
Checks if a value has changed from the last iteration of a loop.
The ``{% ifchanged %}`` block tag is used within a loop. It has two
possible uses.
1. Checks its own rendered contents against its previous state and only
displays the content if it has changed. For example, this displays a
list of days, only displaying the month if it changes::
<h1>Archive for {{ year }}</h1>
{% for date in days %}
{% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %}
<a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a>
{% endfor %}
2. If given one or more variables, check whether any variable has changed.
For example, the following shows the date every time it changes, while
showing the hour if either the hour or the date has changed::
{% for date in days %}
{% ifchanged date.date %} {{ date.date }} {% endifchanged %}
{% ifchanged date.hour date.date %}
{{ date.hour }}
{% endifchanged %}
{% endfor %}
"""
bits = token.split_contents()
nodelist_true = parser.parse(('else', 'endifchanged'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifchanged',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
values = [parser.compile_filter(bit) for bit in bits[1:]]
return IfChangedNode(nodelist_true, nodelist_false, *values)
@register.tag
def ssi(parser, token):
"""
Outputs the contents of a given file into the page.
Like a simple "include" tag, the ``ssi`` tag includes the contents
of another file -- which must be specified using an absolute path --
in the current page::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" %}
If the optional "parsed" parameter is given, the contents of the included
file are evaluated as template code, with the current context::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" parsed %}
"""
bits = token.split_contents()
parsed = False
if len(bits) not in (2, 3):
raise TemplateSyntaxError("'ssi' tag takes one argument: the path to"
" the file to be included")
if len(bits) == 3:
if bits[2] == 'parsed':
parsed = True
else:
raise TemplateSyntaxError("Second (optional) argument to %s tag"
" must be 'parsed'" % bits[0])
filepath = parser.compile_filter(bits[1])
return SsiNode(filepath, parsed)
@register.tag
def load(parser, token):
"""
Loads a custom template tag set.
For example, to load the template tags in
``django/templatetags/news/photos.py``::
{% load news.photos %}
Can also be used to load an individual tag/filter from
a library::
{% load byline from news %}
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) >= 4 and bits[-2] == "from":
try:
taglib = bits[-1]
lib = get_library(taglib)
except InvalidTemplateLibrary as e:
raise TemplateSyntaxError("'%s' is not a valid tag library: %s" %
(taglib, e))
else:
temp_lib = Library()
for name in bits[1:-2]:
if name in lib.tags:
temp_lib.tags[name] = lib.tags[name]
# a name could be a tag *and* a filter, so check for both
if name in lib.filters:
temp_lib.filters[name] = lib.filters[name]
elif name in lib.filters:
temp_lib.filters[name] = lib.filters[name]
else:
raise TemplateSyntaxError("'%s' is not a valid tag or filter in tag library '%s'" %
(name, taglib))
parser.add_library(temp_lib)
else:
for taglib in bits[1:]:
# add the library to the parser
try:
lib = get_library(taglib)
parser.add_library(lib)
except InvalidTemplateLibrary as e:
raise TemplateSyntaxError("'%s' is not a valid tag library: %s" %
(taglib, e))
return LoadNode()
@register.tag
def now(parser, token):
"""
Displays the date, formatted according to the given string.
Uses the same format as PHP's ``date()`` function; see http://php.net/date
for all the possible values.
Sample usage::
It is {% now "jS F Y H:i" %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'now' statement takes one argument")
format_string = bits[1][1:-1]
return NowNode(format_string)
@register.tag
def regroup(parser, token):
"""
Regroups a list of alike objects by a common attribute.
This complex tag is best illustrated by use of an example: say that
``people`` is a list of ``Person`` objects that have ``first_name``,
``last_name``, and ``gender`` attributes, and you'd like to display a list
that looks like:
* Male:
* George Bush
* Bill Clinton
* Female:
* Margaret Thatcher
* Colendeeza Rice
* Unknown:
* Pat Smith
The following snippet of template code would accomplish this dubious task::
{% regroup people by gender as grouped %}
<ul>
{% for group in grouped %}
<li>{{ group.grouper }}
<ul>
{% for item in group.list %}
<li>{{ item }}</li>
{% endfor %}
</ul>
{% endfor %}
</ul>
As you can see, ``{% regroup %}`` populates a variable with a list of
objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the
item that was grouped by; ``list`` contains the list of objects that share
that ``grouper``. In this case, ``grouper`` would be ``Male``, ``Female``
and ``Unknown``, and ``list`` is the list of people with those genders.
Note that ``{% regroup %}`` does not work when the list to be grouped is not
sorted by the key you are grouping by! This means that if your list of
people was not sorted by gender, you'd need to make sure it is sorted
before using it, i.e.::
{% regroup people|dictsort:"gender" by gender as grouped %}
"""
bits = token.split_contents()
if len(bits) != 6:
raise TemplateSyntaxError("'regroup' tag takes five arguments")
target = parser.compile_filter(bits[1])
if bits[2] != 'by':
raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'")
if bits[4] != 'as':
raise TemplateSyntaxError("next-to-last argument to 'regroup' tag must"
" be 'as'")
var_name = bits[5]
# RegroupNode will take each item in 'target', put it in the context under
# 'var_name', evaluate 'var_name'.'expression' in the current context, and
# group by the resulting value. After all items are processed, it will
# save the final result in the context under 'var_name', thus clearing the
# temporary values. This hack is necessary because the template engine
# doesn't provide a context-aware equivalent of Python's getattr.
expression = parser.compile_filter(var_name +
VARIABLE_ATTRIBUTE_SEPARATOR +
bits[3])
return RegroupNode(target, expression, var_name)
@register.tag
def spaceless(parser, token):
"""
Removes whitespace between HTML tags, including tab and newline characters.
Example usage::
{% spaceless %}
<p>
<a href="foo/">Foo</a>
</p>
{% endspaceless %}
This example would return this HTML::
<p><a href="foo/">Foo</a></p>
Only space between *tags* is normalized -- not space between tags and text.
In this example, the space around ``Hello`` won't be stripped::
{% spaceless %}
<strong>
Hello
</strong>
{% endspaceless %}
"""
nodelist = parser.parse(('endspaceless',))
parser.delete_first_token()
return SpacelessNode(nodelist)
@register.tag
def templatetag(parser, token):
"""
Outputs one of the bits used to compose template tags.
Since the template system has no concept of "escaping", to display one of
the bits used in template tags, you must use the ``{% templatetag %}`` tag.
The argument tells which template bit to output:
================== =======
Argument Outputs
================== =======
``openblock`` ``{%``
``closeblock`` ``%}``
``openvariable`` ``{{``
``closevariable`` ``}}``
``openbrace`` ``{``
``closebrace`` ``}``
``opencomment`` ``{#``
``closecomment`` ``#}``
================== =======
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'templatetag' statement takes one argument")
tag = bits[1]
if tag not in TemplateTagNode.mapping:
raise TemplateSyntaxError("Invalid templatetag argument: '%s'."
" Must be one of: %s" %
(tag, list(TemplateTagNode.mapping)))
return TemplateTagNode(tag)
@register.tag
def url(parser, token):
"""
Returns an absolute URL matching given view with its parameters.
This is a way to define links that aren't tied to a particular URL
configuration::
{% url "path.to.some_view" arg1 arg2 %}
or
{% url "path.to.some_view" name1=value1 name2=value2 %}
The first argument is a path to a view. It can be an absolute Python path
or just ``app_name.view_name`` without the project name if the view is
located inside the project.
Other arguments are space-separated values that will be filled in place of
positional and keyword arguments in the URL. Don't mix positional and
keyword arguments.
All arguments for the URL should be present.
For example if you have a view ``app_name.client`` taking client's id and
the corresponding line in a URLconf looks like this::
('^client/(\d+)/$', 'app_name.client')
and this app's URLconf is included into the project's URLconf under some
path::
('^clients/', include('project_name.app_name.urls'))
then in a template you can create a link for a certain client like this::
{% url "app_name.client" client.id %}
The URL will look like ``/clients/client/123/``.
The first argument can also be a named URL instead of the Python path to
the view callable. For example if the URLconf entry looks like this::
url('^client/(\d+)/$', name='client-detail-view')
then in the template you can use::
{% url "client-detail-view" client.id %}
There is even another possible value type for the first argument. It can be
the name of a template variable that will be evaluated to obtain the view
name or the URL name, e.g.::
{% with view_path="app_name.client" %}
{% url view_path client.id %}
{% endwith %}
or,
{% with url_name="client-detail-view" %}
{% url url_name client.id %}
{% endwith %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument"
" (path to a view)" % bits[0])
viewname = parser.compile_filter(bits[1])
args = []
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNode(viewname, args, kwargs, asvar)
@register.tag
def verbatim(parser, token):
"""
Stops the template engine from rendering the contents of this block tag.
Usage::
{% verbatim %}
{% don't process this %}
{% endverbatim %}
You can also designate a specific closing tag block (allowing the
unrendered use of ``{% endverbatim %}``)::
{% verbatim myblock %}
...
{% endverbatim myblock %}
"""
nodelist = parser.parse(('endverbatim',))
parser.delete_first_token()
return VerbatimNode(nodelist.render(Context()))
@register.tag
def widthratio(parser, token):
"""
For creating bar charts and such, this tag calculates the ratio of a given
value to a maximum value, and then applies that ratio to a constant.
For example::
<img src="bar.png" alt="Bar"
height="10" width="{% widthratio this_value max_value max_width %}" />
If ``this_value`` is 175, ``max_value`` is 200, and ``max_width`` is 100,
the image in the above example will be 88 pixels wide
(because 175/200 = .875; .875 * 100 = 87.5 which is rounded up to 88).
In some cases you might want to capture the result of widthratio in a
variable. It can be useful for instance in a blocktrans like this::
{% widthratio this_value max_value max_width as width %}
{% blocktrans %}The width is: {{ width }}{% endblocktrans %}
"""
bits = token.split_contents()
if len(bits) == 4:
tag, this_value_expr, max_value_expr, max_width = bits
asvar = None
elif len(bits) == 6:
tag, this_value_expr, max_value_expr, max_width, as_, asvar = bits
if as_ != 'as':
raise TemplateSyntaxError("Invalid syntax in widthratio tag. Expecting 'as' keyword")
else:
raise TemplateSyntaxError("widthratio takes at least three arguments")
return WidthRatioNode(parser.compile_filter(this_value_expr),
parser.compile_filter(max_value_expr),
parser.compile_filter(max_width),
asvar=asvar)
@register.tag('with')
def do_with(parser, token):
"""
Adds one or more values to the context (inside of this block) for caching
and easy access.
For example::
{% with total=person.some_sql_method %}
{{ total }} object{{ total|pluralize }}
{% endwith %}
Multiple values can be added to the context::
{% with foo=1 bar=2 %}
...
{% endwith %}
The legacy format of ``{% with person.some_sql_method as total %}`` is
still accepted.
"""
bits = token.split_contents()
remaining_bits = bits[1:]
extra_context = token_kwargs(remaining_bits, parser, support_legacy=True)
if not extra_context:
raise TemplateSyntaxError("%r expected at least one variable "
"assignment" % bits[0])
if remaining_bits:
raise TemplateSyntaxError("%r received an invalid token: %r" %
(bits[0], remaining_bits[0]))
nodelist = parser.parse(('endwith',))
parser.delete_first_token()
return WithNode(None, None, nodelist, extra_context=extra_context)
| 912/M-new | virtualenvironment/experimental/lib/python2.7/site-packages/django/template/defaulttags.py | Python | gpl-2.0 | 50,803 |
# -*- coding: utf-8 -*-
import pytest
from cfme import test_requirements
from cfme.cloud.provider import CloudProvider
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.wait import TimedOutError
pytestmark = [
pytest.mark.tier(2),
test_requirements.general_ui,
pytest.mark.usefixtures('setup_provider'),
pytest.mark.provider([CloudProvider], required_fields=['remove_test'], scope="module")
]
@pytest.fixture()
def set_grid(appliance):
view = navigate_to(appliance.collections.cloud_images, 'All')
view.toolbar.view_selector.select('Grid View')
yield
view = navigate_to(appliance.collections.cloud_images, 'All')
view.toolbar.view_selector.select('List View')
def test_delete_instance_appear_after_refresh(appliance, provider):
""" Tests delete instance
Metadata:
test_flag: delete_object
Polarion:
assignee: mmojzis
casecomponent: WebUI
initialEstimate: 1/4h
"""
instance_name = provider.data['remove_test']['instance']
test_instance = appliance.collections.cloud_instances.instantiate(instance_name, provider)
test_instance.delete(from_details=False)
test_instance.wait_for_delete()
provider.refresh_provider_relationships()
test_instance.wait_to_appear()
def test_delete_image_appear_after_refresh(appliance, provider, set_grid, request):
""" Tests delete image
Metadata:
test_flag: delete_object
Polarion:
assignee: mmojzis
casecomponent: WebUI
caseimportance: medium
initialEstimate: 1/10h
"""
image_name = provider.data['remove_test']['image']
test_image = appliance.collections.cloud_images.instantiate(image_name, provider)
test_image.delete(from_details=False)
test_image.wait_for_delete()
provider.refresh_provider_relationships()
test_image.wait_to_appear()
def test_delete_stack_appear_after_refresh(appliance, provider, provisioning,
request):
""" Tests delete stack
Metadata:
test_flag: delete_object
Polarion:
assignee: mmojzis
casecomponent: WebUI
initialEstimate: 1/4h
"""
stack = appliance.collections.cloud_stacks.instantiate(name=provisioning['stacks'][0],
provider=provider)
# wait for delete implemented in delete()
stack.delete()
# refresh relationships is implemented in wait_for_exists()
try:
stack.wait_for_exists()
except TimedOutError:
pytest.fail("stack didn't appear after refresh")
| Yadnyawalkya/integration_tests | cfme/tests/cloud/test_delete_cloud_object.py | Python | gpl-2.0 | 2,638 |
def main():
n = 2000000
sum = 0
prime = [True] * n
for p in range(2, n):
if prime[p]:
sum += p
for i in range(p * p, n, p):
prime[i] = False
print(sum)
if __name__ == "__main__":
main()
| OpenGenus/cosmos | code/online_challenges/src/project_euler/problem_010/problem_010.py | Python | gpl-3.0 | 262 |
# Copyright (c) 2012 The Regents of The University of Michigan
# Copyright (c) 2016 Centre National de la Recherche Scientifique
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
# Anastasiia Butko
# Louisa Bessad
from m5.objects import *
#-----------------------------------------------------------------------
# ex5 LITTLE core (based on the ARM Cortex-A7)
#-----------------------------------------------------------------------
# Simple ALU Instructions have a latency of 3
class ex5_LITTLE_Simple_Int(MinorDefaultIntFU):
opList = [ OpDesc(opClass='IntAlu', opLat=4) ]
# Complex ALU instructions have a variable latencies
class ex5_LITTLE_Complex_IntMul(MinorDefaultIntMulFU):
opList = [ OpDesc(opClass='IntMult', opLat=7) ]
class ex5_LITTLE_Complex_IntDiv(MinorDefaultIntDivFU):
opList = [ OpDesc(opClass='IntDiv', opLat=9) ]
# Floating point and SIMD instructions
class ex5_LITTLE_FP(MinorDefaultFloatSimdFU):
opList = [ OpDesc(opClass='SimdAdd', opLat=6),
OpDesc(opClass='SimdAddAcc', opLat=4),
OpDesc(opClass='SimdAlu', opLat=4),
OpDesc(opClass='SimdCmp', opLat=1),
OpDesc(opClass='SimdCvt', opLat=3),
OpDesc(opClass='SimdMisc', opLat=3),
OpDesc(opClass='SimdMult',opLat=4),
OpDesc(opClass='SimdMultAcc',opLat=5),
OpDesc(opClass='SimdShift',opLat=3),
OpDesc(opClass='SimdShiftAcc', opLat=3),
OpDesc(opClass='SimdSqrt', opLat=9),
OpDesc(opClass='SimdFloatAdd',opLat=8),
OpDesc(opClass='SimdFloatAlu',opLat=6),
OpDesc(opClass='SimdFloatCmp', opLat=6),
OpDesc(opClass='SimdFloatCvt', opLat=6),
OpDesc(opClass='SimdFloatDiv', opLat=20, pipelined=False),
OpDesc(opClass='SimdFloatMisc', opLat=6),
OpDesc(opClass='SimdFloatMult', opLat=15),
OpDesc(opClass='SimdFloatMultAcc',opLat=6),
OpDesc(opClass='SimdFloatSqrt', opLat=17),
OpDesc(opClass='FloatAdd', opLat=8),
OpDesc(opClass='FloatCmp', opLat=6),
OpDesc(opClass='FloatCvt', opLat=6),
OpDesc(opClass='FloatDiv', opLat=15, pipelined=False),
OpDesc(opClass='FloatSqrt', opLat=33),
OpDesc(opClass='FloatMult', opLat=6) ]
# Load/Store Units
class ex5_LITTLE_MemFU(MinorDefaultMemFU):
opList = [ OpDesc(opClass='MemRead',opLat=1),
OpDesc(opClass='MemWrite',opLat=1) ]
# Misc Unit
class ex5_LITTLE_MiscFU(MinorDefaultMiscFU):
opList = [ OpDesc(opClass='IprAccess',opLat=1),
OpDesc(opClass='InstPrefetch',opLat=1) ]
# Functional Units for this CPU
class ex5_LITTLE_FUP(MinorFUPool):
funcUnits = [ex5_LITTLE_Simple_Int(), ex5_LITTLE_Simple_Int(),
ex5_LITTLE_Complex_IntMul(), ex5_LITTLE_Complex_IntDiv(),
ex5_LITTLE_FP(), ex5_LITTLE_MemFU(),
ex5_LITTLE_MiscFU()]
class ex5_LITTLE(MinorCPU):
executeFuncUnits = ex5_LITTLE_FUP()
class L1Cache(Cache):
tag_latency = 2
data_latency = 2
response_latency = 2
tgts_per_mshr = 8
# Consider the L2 a victim cache also for clean lines
writeback_clean = True
class L1I(L1Cache):
mshrs = 2
size = '32kB'
assoc = 2
is_read_only = True
tgts_per_mshr = 20
class L1D(L1Cache):
mshrs = 4
size = '32kB'
assoc = 4
write_buffers = 4
# TLB Cache
# Use a cache as a L2 TLB
class WalkCache(Cache):
tag_latency = 2
data_latency = 2
response_latency = 2
mshrs = 6
tgts_per_mshr = 8
size = '1kB'
assoc = 2
write_buffers = 16
is_read_only = True
# Writeback clean lines as well
writeback_clean = True
# L2 Cache
class L2(Cache):
tag_latency = 9
data_latency = 9
response_latency = 9
mshrs = 8
tgts_per_mshr = 12
size = '512kB'
assoc = 8
write_buffers = 16
prefetch_on_access = True
clusivity = 'mostly_excl'
# Simple stride prefetcher
prefetcher = StridePrefetcher(degree=1, latency = 1)
tags = BaseSetAssoc()
repl_policy = RandomRP()
| vineodd/PIMSim | GEM5Simulation/gem5/configs/common/cores/arm/ex5_LITTLE.py | Python | gpl-3.0 | 5,637 |
#!/usr/bin/python
# This example integrates Jupiter and Saturn in the Solar system for a variety of initial conditions.
# Alongside the normal equations of motions, IAS15 is used to integrate the variational equations.
# These can be used to measure the Mean Exponential Growth of Nearby Orbits (MEGNO), a chaos indicator.
# This example script runs 12^2 simulations and plots the MEGNO value. Values close to <Y>=2 correspond
# to regular quasi-periodic orbits. Higher values of <Y> correspond to chaotic orbits.
# Import matplotlib
import matplotlib; matplotlib.use("pdf")
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# Import the rebound module
import rebound
# Import other modules
import numpy as np
import multiprocessing
# Runs one simulation.
def simulation(par):
saturn_a, saturn_e = par
sim = rebound.Simulation()
sim.integrator = "whfast"
sim.min_dt = 5.
sim.dt = 1.
# These parameters are only approximately those of Jupiter and Saturn.
sun = rebound.Particle(m=1.)
sim.add(sun)
jupiter = sim.add(primary=sun,m=0.000954, a=5.204, M=0.600, omega=0.257, e=0.048)
saturn = sim.add(primary=sun,m=0.000285, a=saturn_a, M=0.871, omega=1.616, e=saturn_e)
sim.move_to_com()
sim.init_megno(1e-16)
sim.integrate(1e3*2.*np.pi)
return [sim.calculate_megno(),1./(sim.calculate_lyapunov()*2.*np.pi)] # returns MEGNO and Lypunov timescale in years
### Setup grid and run many simulations in parallel
N = 100 # Grid size, increase this number to see more detail
a = np.linspace(7.,10.,N) # range of saturn semi-major axis in AU
e = np.linspace(0.,0.5,N) # range of saturn eccentricity
parameters = []
for _e in e:
for _a in a:
parameters.append([_a,_e])
simulation((8,0.))
# Run simulations in parallel
pool = rebound.InterruptiblePool() # Number of threads default to the number of CPUs on the system
print("Running %d simulations on %d threads..." % (len(parameters), pool._processes))
res = np.nan_to_num(np.array(pool.map(simulation,parameters)))
megno = np.clip(res[:,0].reshape((N,N)),1.8,4.) # clip arrays to plot saturated
lyaptimescale = np.clip(np.absolute(res[:,1].reshape((N,N))),1e1,1e5)
### Create plot and save as pdf
# Setup plots
f, axarr = plt.subplots(2,figsize=(10,10))
extent = [a.min(), a.max(), e.min(), e.max()]
for ax in axarr:
ax.set_xlim(extent[0],extent[1])
ax.set_ylim(extent[2],extent[3])
ax.set_xlabel("$a_{\mathrm{Saturn}}$ [AU]")
ax.set_ylabel("$e_{\mathrm{Saturn}}$")
# Plot MEGNO
im1 = axarr[0].imshow(megno, vmin=1.8, vmax=4., aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn_r", extent=extent)
cb1 = plt.colorbar(im1, ax=axarr[0])
cb1.solids.set_rasterized(True)
cb1.set_label("MEGNO $\\langle Y \\rangle$")
# Plot Lyapunov timescale
im2 = axarr[1].imshow(lyaptimescale, vmin=1e1, vmax=1e5, norm=LogNorm(), aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn", extent=extent)
cb2 = plt.colorbar(im2, ax=axarr[1])
cb2.solids.set_rasterized(True)
cb2.set_label("Lyapunov timescale [years]")
plt.savefig("megno.pdf")
### Automatically open plot (OSX only)
from sys import platform as _platform
if _platform == "darwin":
import os
os.system("open megno.pdf")
| dchandan/rebound | python_examples/megno/problem.py | Python | gpl-3.0 | 3,314 |
# ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
from PyInstaller.utils.hooks import copy_metadata
datas = copy_metadata("eth_keyfile")
| etherkit/OpenBeacon2 | macos/venv/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-eth_keyfile.py | Python | gpl-3.0 | 517 |
import sys, math
| pyta-uoft/pyta | examples/pylint/c0410_multiple_imports.py | Python | gpl-3.0 | 17 |
"""
Widget meta description classes
===============================
"""
import os
import sys
import warnings
# Exceptions
class DescriptionError(Exception):
pass
class WidgetSpecificationError(DescriptionError):
pass
class SignalSpecificationError(DescriptionError):
pass
class CategorySpecificationError(DescriptionError):
pass
###############
# Channel flags
###############
# A single signal
Single = 2
# Multiple signal (more then one input on the channel)
Multiple = 4
# Default signal (default or primary input/output)
Default = 8
NonDefault = 16
# Explicit - only connected if specifically requested or the only possibility
Explicit = 32
# Dynamic type output signal
Dynamic = 64
# Input/output signal (channel) description
class InputSignal(object):
"""
Description of an input channel.
Parameters
----------
name : str
Name of the channel.
type : str or `type`
Type of the accepted signals.
handler : str
Name of the handler method for the signal.
flags : int, optional
Channel flags.
id : str
A unique id of the input signal.
doc : str, optional
A docstring documenting the channel.
"""
def __init__(self, name, type, handler, flags=Single + NonDefault,
id=None, doc=None):
self.name = name
self.type = type
self.handler = handler
self.id = id
self.doc = doc
if isinstance(flags, str):
# flags are stored as strings
warnings.warn("Passing 'flags' as string is deprecated, use "
"integer constants instead",
PendingDeprecationWarning)
flags = eval(flags)
if not (flags & Single or flags & Multiple):
flags += Single
if not (flags & Default or flags & NonDefault):
flags += NonDefault
self.single = flags & Single
self.default = flags & Default
self.explicit = flags & Explicit
self.flags = flags
def __str__(self):
fmt = ("{0.__name__}(name={name!r}, type={type!s}, "
"handler={handler}, ...)")
return fmt.format(type(self), **self.__dict__)
def input_channel_from_args(args):
if isinstance(args, tuple):
return InputSignal(*args)
elif isinstance(args, InputSignal):
return args
else:
raise TypeError("invalid declaration of widget input signal")
class OutputSignal(object):
"""
Description of an output channel.
Parameters
----------
name : str
Name of the channel.
type : str or `type`
Type of the output signals.
flags : int, optional
Channel flags.
id : str
A unique id of the output signal.
doc : str, optional
A docstring documenting the channel.
"""
def __init__(self, name, type, flags=Single + NonDefault,
id=None, doc=None):
self.name = name
self.type = type
self.id = id
self.doc = doc
if isinstance(flags, str):
# flags are stored as strings
warnings.warn("Passing 'flags' as string is deprecated, use "
"integer constants instead",
PendingDeprecationWarning)
flags = eval(flags)
if not (flags & Single or flags & Multiple):
flags += Single
if not (flags & Default or flags & NonDefault):
flags += NonDefault
self.single = flags & Single
self.default = flags & Default
self.explicit = flags & Explicit
self.dynamic = flags & Dynamic
self.flags = flags
if self.dynamic and not self.single:
raise SignalSpecificationError(
"Output signal can not be 'Multiple' and 'Dynamic'."
)
def __str__(self):
fmt = ("{0.__name__}(name={name!r}, type={type!s}, "
"...)")
return fmt.format(type(self), **self.__dict__)
def output_channel_from_args(args):
if isinstance(args, tuple):
return OutputSignal(*args)
elif isinstance(args, OutputSignal):
return args
else:
raise TypeError("invalid declaration of widget output signal")
class WidgetDescription(object):
"""
Description of a widget.
Parameters
----------
name : str
A human readable name of the widget.
id : str
A unique identifier of the widget (in most situations this should
be the full module name).
category : str, optional
A name of the category in which this widget belongs.
version : str, optional
Version of the widget. By default the widget inherits the project
version.
description : str, optional
A short description of the widget, suitable for a tool tip.
long_description : str, optional
A longer description of the widget, suitable for a 'what's this?'
role.
qualified_name : str
A qualified name (import name) of the class implementing the widget.
package : str, optional
A package name where the widget is implemented.
project_name : str, optional
The distribution name that provides the widget.
inputs : list of :class:`InputSignal`, optional
A list of input channels provided by the widget.
outputs : list of :class:`OutputSignal`, optional
A list of output channels provided by the widget.
help : str, optional
URL or an Resource template of a detailed widget help page.
help_ref : str, optional
A text reference id that can be used to identify the help
page, for instance an intersphinx reference.
author : str, optional
Author name.
author_email : str, optional
Author email address.
maintainer : str, optional
Maintainer name
maintainer_email : str, optional
Maintainer email address.
keywords : list-of-str, optional
A list of keyword phrases.
priority : int, optional
Widget priority (the order of the widgets in a GUI presentation).
icon : str, optional
A filename of the widget icon (in relation to the package).
background : str, optional
Widget's background color (in the canvas GUI).
replaces : list-of-str, optional
A list of `id`s this widget replaces (optional).
"""
def __init__(self, name, id, category=None, version=None,
description=None, long_description=None,
qualified_name=None, package=None, project_name=None,
inputs=[], outputs=[],
author=None, author_email=None,
maintainer=None, maintainer_email=None,
help=None, help_ref=None, url=None, keywords=None,
priority=sys.maxsize,
icon=None, background=None,
replaces=None,
):
if not qualified_name:
# TODO: Should also check that the name is real.
raise ValueError("'qualified_name' must be supplied.")
self.name = name
self.id = id
self.category = category
self.version = version
self.description = description
self.long_description = long_description
self.qualified_name = qualified_name
self.package = package
self.project_name = project_name
self.inputs = inputs
self.outputs = outputs
self.help = help
self.help_ref = help_ref
self.author = author
self.author_email = author_email
self.maintainer = maintainer
self.maintainer_email = maintainer_email
self.url = url
self.keywords = keywords
self.priority = priority
self.icon = icon
self.background = background
self.replaces = replaces
def __str__(self):
return ("WidgetDescription(name=%(name)r, id=%(id)r), "
"category=%(category)r, ...)") % self.__dict__
def __repr__(self):
return self.__str__()
@classmethod
def from_file(cls, filename, import_name=None):
"""
Widget description from old style (2.5 version) widget
descriptions.
"""
from Orange.orng.widgetParser import WidgetMetaData
from ..orngSignalManager import resolveSignal
rest, ext = os.path.splitext(filename)
if ext in [".pyc", ".pyo"]:
filename = filename[:-1]
contents = open(filename, "rb").read()
dirname, basename = os.path.split(filename)
default_cat = os.path.basename(dirname)
try:
meta = WidgetMetaData(contents, default_cat)
except Exception as ex:
if "Not an Orange widget module." in str(ex):
raise WidgetSpecificationError
else:
raise
widget_name, ext = os.path.splitext(basename)
if import_name is None:
import_name = widget_name
wmod = __import__(import_name, fromlist=[""])
qualified_name = "%s.%s" % (import_name, widget_name)
inputs = eval(meta.inputList)
outputs = eval(meta.outputList)
inputs = map(input_channel_from_args, inputs)
outputs = map(output_channel_from_args, outputs)
# Resolve signal type names into concrete type instances
inputs = [resolveSignal(input, globals=wmod.__dict__)
for input in inputs]
outputs = [resolveSignal(output, globals=wmod.__dict__)
for output in outputs]
# Convert all signal types back into qualified names.
# This is to prevent any possible import problems when cached
# descriptions are unpickled (the relevant code using this lists
# should be able to handle missing types better).
for s in inputs + outputs:
s.type = "%s.%s" % (s.type.__module__, s.type.__name__)
desc = WidgetDescription(
name=meta.name,
id=qualified_name,
category=meta.category,
description=meta.description,
qualified_name=qualified_name,
package=wmod.__package__,
keywords=meta.tags,
inputs=inputs,
outputs=outputs,
icon=meta.icon,
priority=int(meta.priority)
)
return desc
@classmethod
def from_module(cls, module):
"""
Get the widget description from a module.
The module is inspected for global variables (upper case versions of
`WidgetDescription.__init__` parameters).
Parameters
----------
module : `module` or str
A module to inspect for widget description. Can be passed
as a string (qualified import name).
"""
if isinstance(module, str):
module = __import__(module, fromlist=[""])
module_name = module.__name__.rsplit(".", 1)[-1]
if module.__package__:
package_name = module.__package__.rsplit(".", 1)[-1]
else:
package_name = None
default_cat_name = package_name if package_name else ""
from Orange.widgets.widget import WidgetMetaClass
for widget_cls_name, widget_class in module.__dict__.items():
if (isinstance(widget_class, WidgetMetaClass) and
widget_class.name):
break
else:
raise WidgetSpecificationError
qualified_name = "%s.%s" % (module.__name__, widget_cls_name)
# Convert all signal types into qualified names.
# This is to prevent any possible import problems when cached
# descriptions are unpickled (the relevant code using this lists
# should be able to handle missing types better).
for s in widget_class.inputs + widget_class.outputs:
s.type = "%s.%s" % (s.type.__module__, s.type.__name__)
return cls(
name=widget_class.name,
id=widget_class.id or module_name,
category=widget_class.category or default_cat_name,
version=widget_class.version,
description=widget_class.description,
long_description=widget_class.long_description,
qualified_name=qualified_name,
package=module.__package__,
inputs=widget_class.inputs,
outputs=widget_class.outputs,
author=widget_class.author,
author_email=widget_class.author_email,
maintainer=widget_class.maintainer,
maintainer_email=widget_class.maintainer_email,
help=widget_class.help,
help_ref=widget_class.help_ref,
url=widget_class.url,
keywords=widget_class.keywords,
priority=widget_class.priority,
icon=widget_class.icon,
background=widget_class.background,
replaces=widget_class.replaces)
class CategoryDescription(object):
"""
Description of a widget category.
Parameters
----------
name : str
A human readable name.
version : str, optional
Version string.
description : str, optional
A short description of the category, suitable for a tool tip.
long_description : str, optional
A longer description.
qualified_name : str,
Qualified name
project_name : str
A project name providing the category.
priority : int
Priority (order in the GUI).
icon : str
An icon filename (a resource name retrievable using `pkg_resources`
relative to `qualified_name`).
background : str
An background color for widgets in this category.
"""
def __init__(self, name=None, version=None,
description=None, long_description=None,
qualified_name=None, package=None,
project_name=None, author=None, author_email=None,
maintainer=None, maintainer_email=None,
url=None, help=None, keywords=None,
widgets=None, priority=sys.maxsize,
icon=None, background=None
):
self.name = name
self.version = version
self.description = description
self.long_description = long_description
self.qualified_name = qualified_name
self.package = package
self.project_name = project_name
self.author = author
self.author_email = author_email
self.maintainer = maintainer
self.maintainer_email = maintainer_email
self.url = url
self.help = help
self.keywords = keywords
self.widgets = widgets or []
self.priority = priority
self.icon = icon
self.background = background
def __str__(self):
return "CategoryDescription(name=%(name)r, ...)" % self.__dict__
def __repr__(self):
return self.__str__()
@classmethod
def from_package(cls, package):
"""
Get the CategoryDescription from a package.
Parameters
----------
package : `module` or `str`
A package containing the category.
"""
if isinstance(package, str):
package = __import__(package, fromlist=[""])
package_name = package.__name__
qualified_name = package_name
default_name = package_name.rsplit(".", 1)[-1]
name = getattr(package, "NAME", default_name)
description = getattr(package, "DESCRIPTION", None)
long_description = getattr(package, "LONG_DESCRIPTION", None)
author = getattr(package, "AUTHOR", None)
author_email = getattr(package, "AUTHOR_EMAIL", None)
maintainer = getattr(package, "MAINTAINER", None)
maintainer_email = getattr(package, "MAINTAINER_MAIL", None)
url = getattr(package, "URL", None)
help = getattr(package, "HELP", None)
keywords = getattr(package, "KEYWORDS", None)
widgets = getattr(package, "WIDGETS", None)
priority = getattr(package, "PRIORITY", sys.maxsize - 1)
icon = getattr(package, "ICON", None)
background = getattr(package, "BACKGROUND", None)
if priority == sys.maxsize - 1 and name.lower() == "prototypes":
priority = sys.maxsize
return CategoryDescription(
name=name,
qualified_name=qualified_name,
description=description,
long_description=long_description,
help=help,
author=author,
author_email=author_email,
maintainer=maintainer,
maintainer_email=maintainer_email,
url=url,
keywords=keywords,
widgets=widgets,
priority=priority,
icon=icon,
background=background)
| PythonCharmers/orange3 | Orange/canvas/registry/description.py | Python | gpl-3.0 | 16,985 |
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import Indicator, MovAv
class Trix(Indicator):
'''
Defined by Jack Hutson in the 80s and shows the Rate of Change (%) or slope
of a triple exponentially smoothed moving average
Formula:
- ema1 = EMA(data, period)
- ema2 = EMA(ema1, period)
- ema3 = EMA(ema2, period)
- trix = 100 * (ema3 - ema3(-1)) / ema3(-1)
The final formula can be simplified to: 100 * (ema3 / ema3(-1) - 1)
The moving average used is the one originally defined by Wilder,
the SmoothedMovingAverage
See:
- https://en.wikipedia.org/wiki/Trix_(technical_analysis)
- http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:trix
'''
alias = ('TRIX',)
lines = ('trix',)
params = (('period', 15), ('_rocperiod', 1), ('_movav', MovAv.EMA),)
plotinfo = dict(plothlines=[0.0])
def _plotlabel(self):
plabels = [self.p.period]
plabels += [self.p._rocperiod] * self.p.notdefault('_rocperiod')
plabels += [self.p._movav] * self.p.notdefault('_movav')
return plabels
def __init__(self):
ema1 = self.p._movav(self.data, period=self.p.period)
ema2 = self.p._movav(ema1, period=self.p.period)
ema3 = self.p._movav(ema2, period=self.p.period)
# 1 period Percentage Rate of Change
self.lines.trix = 100.0 * (ema3 / ema3(-self.p._rocperiod) - 1.0)
super(Trix, self).__init__()
class TrixSignal(Trix):
'''
Extension of Trix with a signal line (ala MACD)
Formula:
- trix = Trix(data, period)
- signal = EMA(trix, sigperiod)
See:
- http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:trix
'''
lines = ('signal',)
params = (('sigperiod', 9),)
def __init__(self):
super(TrixSignal, self).__init__()
self.l.signal = self.p._movav(self.lines[0], period=self.p.sigperiod)
| YuepengGuo/backtrader | backtrader/indicators/trix.py | Python | gpl-3.0 | 2,942 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import os
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
try:
from collections import UserDict
except ImportError:
from UserDict import UserDict
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
try:
import pathlib
except ImportError:
pathlib = None
from io import open
import sys
try:
from thread import get_ident
except ImportError:
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
__all__ = ['UserDict', 'OrderedDict', 'open']
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
native_str = str
str = type('str')
def from_none(exc):
"""raise from_none(ValueError('a')) == raise ValueError('a') from None"""
exc.__cause__ = None
exc.__suppress_context__ = True
return exc
# from reprlib 3.2.1
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
# from collections 3.2.1
class _ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
# can't use 'key in mapping' with defaultdict
return mapping[key]
except KeyError:
pass
# support subclasses that define __missing__
return self.__missing__(key)
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
# reuses stored hash values if possible
return len(set().union(*self.maps))
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
@recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
"""
New ChainMap or subclass with a new copy of
maps[0] and refs to maps[1:]
"""
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError(
'Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
"""
Remove and return an item pair from maps[0].
Raise KeyError is maps[0] is empty.
"""
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
"""
Remove *key* from maps[0] and return its value.
Raise KeyError if *key* not in maps[0].
"""
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError(
'Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
try:
from collections import ChainMap
except ImportError:
ChainMap = _ChainMap
_ABC = getattr(
abc, 'ABC',
# Python 3.3 compatibility
abc.ABCMeta(
native_str('__ABC'),
(object,),
dict(__metaclass__=abc.ABCMeta),
),
)
class _PathLike(_ABC):
"""Abstract base class for implementing the file system path protocol."""
@abc.abstractmethod
def __fspath__(self):
"""Return the file system path representation of the object."""
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
return bool(
hasattr(subclass, '__fspath__')
# workaround for Python 3.5
or pathlib and issubclass(subclass, pathlib.Path)
)
PathLike = getattr(os, 'PathLike', _PathLike)
def _fspath(path):
"""Return the path representation of a path-like object.
If str or bytes is passed in, it is returned unchanged. Otherwise the
os.PathLike interface is used to get the path representation. If the
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
if isinstance(path, (str, bytes)):
return path
if not hasattr(path, '__fspath__') and isinstance(path, pathlib.Path):
# workaround for Python 3.5
return str(path)
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
else:
raise TypeError("expected str, bytes or os.PathLike object, "
"not " + path_type.__name__)
if isinstance(path_repr, (str, bytes)):
return path_repr
else:
raise TypeError("expected {}.__fspath__() to return str or bytes, "
"not {}".format(path_type.__name__,
type(path_repr).__name__))
fspath = getattr(os, 'fspath', _fspath)
| pymedusa/SickRage | ext2/backports/configparser/helpers.py | Python | gpl-3.0 | 7,648 |
# SPDX-FileCopyrightText: 2015 Eric Larson
#
# SPDX-License-Identifier: Apache-2.0
import logging
import requests
from cachecontrol.adapter import CacheControlAdapter
from cachecontrol.cache import DictCache
from cachecontrol.controller import logger
from argparse import ArgumentParser
def setup_logging():
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
logger.addHandler(handler)
def get_session():
adapter = CacheControlAdapter(
DictCache(), cache_etags=True, serializer=None, heuristic=None
)
sess = requests.Session()
sess.mount("http://", adapter)
sess.mount("https://", adapter)
sess.cache_controller = adapter.controller
return sess
def get_args():
parser = ArgumentParser()
parser.add_argument("url", help="The URL to try and cache")
return parser.parse_args()
def main(args=None):
args = get_args()
sess = get_session()
# Make a request to get a response
resp = sess.get(args.url)
# Turn on logging
setup_logging()
# try setting the cache
sess.cache_controller.cache_response(resp.request, resp.raw)
# Now try to get it
if sess.cache_controller.cached_request(resp.request):
print("Cached!")
else:
print("Not cached :(")
if __name__ == "__main__":
main()
| SickGear/SickGear | lib/cachecontrol/_cmd.py | Python | gpl-3.0 | 1,326 |
import numpy as np
from gpaw.grid_descriptor import GridDescriptor
from gpaw.localized_functions import create_localized_functions
from gpaw.spline import Spline
s=Spline(0, 1.2, [1, 0.6, 0.1, 0.0])
a = 4.0
n = 24
gd = GridDescriptor((n, n, n), (a, a, a))
print gd.get_boxes((0, 0, 0), 1.2, 0)
if 0:
p = create_localized_functions([s], gd, (0.0, 0.0, 0.0), cut=True)
a = np.zeros((n, n, n))
p.add(a, np.array([2.0]))
print a[1,0]
| qsnake/gpaw | gpaw/test/gp2.py | Python | gpl-3.0 | 447 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torchvision.datasets.fakedata import FakeData
from nupic.research.frameworks.self_supervised_learning.experiments import (
SelfSupervisedExperiment,
)
from nupic.research.frameworks.self_supervised_learning.utils import EncoderClassifier
class AutoEncoder(torch.nn.Module):
def __init__(self, input_dim=784, hidden_dim=20):
super().__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, input_dim)
def forward(self, x):
encoded = self.encode(x)
decoded = self.fc2(encoded).view(-1, 1, 28, 28)
return decoded
def encode(self, x):
x = x.flatten(start_dim=1)
encoded = self.fc1(x)
return encoded
class LinearClassifier(torch.nn.Module):
def __init__(self, input_dim=20, num_classes=10):
super().__init__()
self.fc = nn.Linear(input_dim, num_classes)
def forward(self, x):
out = self.fc(x)
return out
fake_data_args = dict(
size=1000, image_size=(1, 28, 28), num_classes=10, transform=transforms.ToTensor()
)
self_supervised_config = dict(
experiment_class=SelfSupervisedExperiment,
num_classes=10,
# Dataset
dataset_class=FakeData,
dataset_args=dict(
unsupervised=fake_data_args,
supervised=fake_data_args,
validation=fake_data_args,
),
# Number of epochs
epochs=5,
epochs_to_validate=[2, 4],
supervised_training_epochs_per_validation=1,
batch_size=32,
batch_size_supervised=32,
# Model class. Must inherit from "torch.nn.Module"
model_class=AutoEncoder,
# model model class arguments passed to the constructor
model_args=dict(),
optimizer_class=torch.optim.Adam,
optimizer_args=dict(lr=0.001),
classifier_config=dict(
model_class=LinearClassifier,
model_args=dict(),
optimizer_class=torch.optim.SGD,
optimizer_args=dict(lr=0.001),
loss_function=torch.nn.functional.cross_entropy,
),
loss_function=torch.nn.functional.mse_loss,
)
class SelfSupervisedLearningTest(unittest.TestCase):
"""
This is a test class for the `SelfSupervisedExperiment` class.
"""
def test_self_supervised_experiment(self):
# Setup experiment and initialize model.
exp = self_supervised_config["experiment_class"]()
exp.setup_experiment(self_supervised_config)
self.assertIsInstance(exp.encoder_classifier, EncoderClassifier)
self.assertTrue(hasattr(exp.encoder_classifier, "classifier"))
self.assertTrue(hasattr(exp.encoder_classifier, "encoder"))
# Loop through some pseudo epochs.
for _ in range(5):
exp.run_epoch()
if __name__ == "__main__":
unittest.main(verbosity=2)
| mrcslws/nupic.research | packages/self_supervised_learning/tests/unit/self_supervised_learning_test.py | Python | agpl-3.0 | 3,847 |
#-*- coding:utf-8 -*-
#
#
# Copyright (C) 2011 Michael Telahun Makonnen <[email protected]>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from datetime import datetime
from dateutil.relativedelta import relativedelta
from osv import fields, osv
from tools.translate import _
class wage_increment(osv.osv):
_name = 'hr.contract.wage.increment'
_description = 'HR Contract Wage Increment'
_columns = {
'effective_date': fields.date('Effective Date', required=True),
'wage': fields.float('Amount', digits=(16, 2)),
'contract_id': fields.many2one('hr.contract', 'Contract'),
}
def _get_contract_id(self, cr, uid, context=None):
if context == None:
context = {}
return context.get('active_id', False)
_defaults = {'contract_id': _get_contract_id}
_rec_name = 'effective_date'
def action_wage_increment(self, cr, uid, ids, context=None):
hr_obj = self.pool.get('hr.contract')
# Copy the contract and adjust start/end dates and wage accordingly.
#
for wi in self.browse(cr, uid, ids, context=context):
data = hr_obj.copy_data(
cr, uid, wi.contract_id.id, context=context)
data['name'] = data['name'] + \
_(' - Wage Change ') + wi.effective_date
data['wage'] = wi.wage
data['date_start'] = wi.effective_date
c_id = hr_obj.create(cr, uid, data, context=context)
if c_id:
vals = {}
vals['date_end'] = datetime.strptime(
wi.effective_date, '%Y-%m-%d').date() - relativedelta(days=-1)
hr_obj.write(cr, uid, wi.contract_id.id, vals, context=context)
return {'type': 'ir.actions.act_window_close'}
| bwrsandman/openerp-hr | hr_wage_increment/wizard/wage_increment.py | Python | agpl-3.0 | 2,472 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2013 jmesteve All Rights Reserved
# https://github.com/jmesteve
# <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_move_line
| jmesteve/openerpseda | openerp/addons_extra/account_move_line_extend/__init__.py | Python | agpl-3.0 | 1,071 |
#
# soaplib - Copyright (C) Soaplib contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import sys
import urllib
import soaplib
from lxml import etree
def create_relates_to_header(relatesTo, attrs={}):
'''Creates a 'relatesTo' header for async callbacks'''
relatesToElement = etree.Element(
'{%s}RelatesTo' % namespaces.ns_wsa)
for k, v in attrs.items():
relatesToElement.set(k, v)
relatesToElement.text = relatesTo
return relatesToElement
def create_callback_info_headers(message_id, reply_to):
'''Creates MessageId and ReplyTo headers for initiating an
async function'''
message_id = etree.Element('{%s}MessageID' % namespaces.ns_wsa)
message_id.text = message_id
reply_to = etree.Element('{%s}ReplyTo' % namespaces.ns_wsa)
address = etree.SubElement(reply_to, '{%s}Address' % namespaces.ns_wsa)
address.text = reply_to
return message_id, reply_to
def get_callback_info(request):
'''
Retrieves the messageId and replyToAddress from the message header.
This is used for async calls.
'''
message_id = None
reply_to_address = None
header = request.soap_req_header
if header:
headers = header.getchildren()
for header in headers:
if header.tag.lower().endswith("messageid"):
message_id = header.text
if header.tag.lower().find("replyto") != -1:
replyToElems = header.getchildren()
for replyTo in replyToElems:
if replyTo.tag.lower().endswith("address"):
reply_to_address = replyTo.text
return message_id, reply_to_address
def get_relates_to_info(request):
'''Retrieves the relatesTo header. This is used for callbacks'''
header = request.soap_req_header
if header:
headers = header.getchildren()
for header in headers:
if header.tag.lower().find('relatesto') != -1:
return header.text
def split_url(url):
'''Splits a url into (uri_scheme, host[:port], path)'''
scheme, remainder = urllib.splittype(url)
host, path = urllib.splithost(remainder)
return scheme.lower(), host, path
def reconstruct_url(environ):
'''
Rebuilds the calling url from values found in the
environment.
This algorithm was found via PEP 333, the wsgi spec and
contributed by Ian Bicking.
'''
url = environ['wsgi.url_scheme'] + '://'
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
if (urllib.quote(environ.get('SCRIPT_NAME', '')) == '/' and
urllib.quote(environ.get('PATH_INFO', ''))[0:1] == '/'):
#skip this if it is only a slash
pass
elif urllib.quote(environ.get('SCRIPT_NAME', ''))[0:2] == '//':
url += urllib.quote(environ.get('SCRIPT_NAME', ''))[1:]
else:
url += urllib.quote(environ.get('SCRIPT_NAME', ''))
url += urllib.quote(environ.get('PATH_INFO', ''))
if environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url
def check_pyversion(*minversion):
return sys.version_info[:3] >= minversion
| soaplib/soaplib | src/soaplib/core/util/__init__.py | Python | lgpl-2.1 | 4,150 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPyproj(PythonPackage):
"""Python interface to the PROJ.4 Library."""
homepage = "https://github.com/pyproj4/pyproj"
url = "https://pypi.io/packages/source/p/pyproj/pyproj-2.2.0.tar.gz"
git = "https://github.com/pyproj4/pyproj.git"
maintainers = ['citibeth', 'adamjstewart']
import_modules = ['pyproj']
version('2.6.0', sha256='977542d2f8cf2981cf3ad72cedfebcd6ac56977c7aa830d9b49fa7888b56e83d')
version('2.2.0', sha256='0a4f793cc93539c2292638c498e24422a2ec4b25cb47545addea07724b2a56e5')
version('2.1.3', sha256='99c52788b01a7bb9a88024bf4d40965c0a66a93d654600b5deacf644775f424d')
version('1.9.6', sha256='e0c02b1554b20c710d16d673817b2a89ff94738b0b537aead8ecb2edc4c4487b')
version('1.9.5.1', sha256='53fa54c8fa8a1dfcd6af4bf09ce1aae5d4d949da63b90570ac5ec849efaf3ea8')
depends_on('python@:2', when='@:1.9.5.1')
depends_on('python@3:', when='@2.3:')
depends_on('[email protected]:', when='@2.6.0:')
depends_on('py-setuptools', type='build')
depends_on('py-cython', type='build')
depends_on('[email protected]:', when='@2.6.0:')
depends_on('py-aenum', type=('build', 'run'), when='@2.2:^python@:3.5')
depends_on('proj')
depends_on('proj@:5', when='@:1')
depends_on('[email protected]:', when='@2.2:')
depends_on('[email protected]:', when='@2.0:')
def setup_build_environment(self, env):
env.set('PROJ_DIR', self.spec['proj'].prefix)
| iulian787/spack | var/spack/repos/builtin/packages/py-pyproj/package.py | Python | lgpl-2.1 | 1,652 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
import time
from kubernetes_py.K8sExceptions import TimedOutException
from kubernetes_py.K8sObject import K8sObject
from kubernetes_py.models.v1.PersistentVolumeClaim import PersistentVolumeClaim
from kubernetes_py.models.v1.ResourceRequirements import ResourceRequirements
from kubernetes_py.models.v1beta1.LabelSelector import LabelSelector
from kubernetes_py.utils import is_valid_dict
READY_WAIT_TIMEOUT_SECONDS = 60
class K8sPersistentVolumeClaim(K8sObject):
def __init__(self, config=None, name=None):
super(K8sPersistentVolumeClaim, self).__init__(config=config, name=name, obj_type="PersistentVolumeClaim")
# ------------------------------------------------------------------------------------- api calls
def create(self):
super(K8sPersistentVolumeClaim, self).create()
self._wait_for_available()
return self
def get(self):
self.model = PersistentVolumeClaim(self.get_model())
return self
def list(self, pattern=None, labels=None):
ls = super(K8sPersistentVolumeClaim, self).list(labels=labels)
claims = list(map(lambda x: PersistentVolumeClaim(x), ls))
if pattern is not None:
claims = list(filter(lambda x: pattern in x.name, claims))
k8s = []
for x in claims:
j = K8sPersistentVolumeClaim(config=self.config, name=x.name).from_model(m=x)
k8s.append(j)
return k8s
# ------------------------------------------------------------------------------------- wait
def _wait_for_available(self):
start_time = time.time()
while not self.model.status.phase == "Bound":
time.sleep(0.5)
self.get()
self._check_timeout(start_time)
def _check_timeout(self, start_time=None):
elapsed_time = time.time() - start_time
if elapsed_time >= READY_WAIT_TIMEOUT_SECONDS: # timeout
raise TimedOutException("Timed out waiting on readiness of PersistentVolumeClaim: [ {} ]".format(self.name))
# ------------------------------------------------------------------------------------- accessModes
@property
def access_modes(self):
return self.model.spec.access_modes
@access_modes.setter
def access_modes(self, modes=None):
self.model.spec.access_modes = modes
# ------------------------------------------------------------------------------------- resources
@property
def resources(self):
return self.model.spec.resources
@resources.setter
def resources(self, res=None):
if not is_valid_dict(res):
raise SyntaxError("K8sPersistentVolumeClaim: resources: [ {} ] is invalid.".format(res))
resources = ResourceRequirements(res)
self.model.spec.resources = resources
# ------------------------------------------------------------------------------------- selector
@property
def selector(self):
return self.model.spec.selector
@selector.setter
def selector(self, sel=None):
if not is_valid_dict(sel):
raise SyntaxError("K8sPersistentVolumeClaim: selector: [ {} ] is invalid.".format(sel))
selector = LabelSelector(sel)
self.model.spec.selector = selector
# ------------------------------------------------------------------------------------- storage_class_name
@property
def storage_class_name(self):
return self.model.spec.storage_class_name
@storage_class_name.setter
def storage_class_name(self, name=None):
self.model.spec.storage_class_name = name
| mnubo/kubernetes-py | kubernetes_py/K8sPersistentVolumeClaim.py | Python | apache-2.0 | 3,778 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import pyarrow as pa
from modelarts import manifest
from modelarts.field_name import CARBON
from pycarbon.sdk.Constants import LOCAL_FILE_PREFIX
class ArrowCarbonReader(object):
def __init__(self):
from jnius import autoclass
self.readerClass = autoclass('org.apache.carbondata.sdk.file.ArrowCarbonReader')
def builder(self, input_split):
self.input_split = input_split
self.ArrowCarbonReaderBuilder = self.readerClass.builder(input_split)
return self
def projection(self, projection_list):
self.ArrowCarbonReaderBuilder.projection(projection_list)
return self
def withHadoopConf(self, key, value):
if "fs.s3a.access.key" == key:
self.ak = value
elif "fs.s3a.secret.key" == key:
self.sk = value
elif "fs.s3a.endpoint" == key:
self.end_point = value
elif "fs.s3a.proxy.host" == key:
self.host = value
elif "fs.s3a.proxy.port" == key:
self.port = value
self.ArrowCarbonReaderBuilder.withHadoopConf(key, value)
return self
def build(self):
self.reader = self.ArrowCarbonReaderBuilder.buildArrowReader()
return self
def withFileLists(self, file_list):
self.ArrowCarbonReaderBuilder.withFileLists(file_list)
return self
def getSplits(self, is_blocklet_split):
from jnius import autoclass
java_list_class = autoclass('java.util.ArrayList')
if str(self.input_split).endswith(".manifest"):
if str(self.input_split).startswith(LOCAL_FILE_PREFIX):
self.manifest_path = str(self.input_split)[len(LOCAL_FILE_PREFIX):]
else:
self.manifest_path = self.input_split
from obs import ObsClient
if str(self.input_split).startswith("s3"):
obsClient = ObsClient(access_key_id=self.ak, secret_access_key=self.sk,
server=str(self.end_point).replace('http://', ''),
long_conn_mode=True)
sources = manifest.getSources(self.manifest_path, CARBON, obsClient)
self.file_path = sources[0]
else:
sources = manifest.getSources(self.manifest_path, CARBON)
java_list = java_list_class()
for source in sources:
java_list.add(source)
return self.ArrowCarbonReaderBuilder.withFileLists(java_list).getSplits(is_blocklet_split)
else:
return self.ArrowCarbonReaderBuilder.getSplits(is_blocklet_split)
def read(self, schema):
address = self.reader.readArrowBatchAddress(schema)
size = (ctypes.c_int32).from_address(address).value
arrowData = (ctypes.c_byte * size).from_address(address + 4)
rawData = bytes(arrowData)
self.reader.freeArrowBatchMemory(address)
reader = pa.RecordBatchFileReader(pa.BufferReader(rawData))
data = reader.read_all()
return data
def close(self):
return self.reader.close()
| zzcclp/carbondata | python/pycarbon/sdk/ArrowCarbonReader.py | Python | apache-2.0 | 3,613 |
from yapsy.IPlugin import IPlugin
class IContainerCrawler(IPlugin):
"""
Crawler plugin interface
Subclasses of this class can be used to implement crawling functions
for different systems.
"""
def crawl(self, container_id):
"""
Crawling function that should return a list of features for
`container_id`. This function is called once for every container
at every crawling interval.
"""
raise NotImplementedError()
def get_feature(self):
"""
Returns the feature type as a string.
"""
raise NotImplementedError()
class IVMCrawler(IPlugin):
"""
Crawler plugin interface
Subclasses of this class can be used to implement crawling functions
for different systems.
"""
def crawl(self, vm_desc):
"""
Crawling function that should return a list of features for
`vm_desc`. This should change to 'vm_name' after auto kernel version
detection. This function is called once for every VM
at every crawling interval.
"""
raise NotImplementedError()
def get_feature(self):
"""
Returns the feature type as a string.
"""
raise NotImplementedError()
class IHostCrawler(IPlugin):
"""
Crawler plugin interface
Subclasses of this class can be used to implement crawling functions
for different host features (e.g. processes running in the host).
"""
def crawl(self):
"""
Crawling function that should return a list of features for the host.
This function is called once at every crawling interval.
"""
raise NotImplementedError()
def get_feature(self):
"""
Returns the feature type as a string.
"""
raise NotImplementedError()
| cloudviz/agentless-system-crawler | crawler/icrawl_plugin.py | Python | apache-2.0 | 1,847 |
#!/usr/bin/env python
import json
import os
from os.path import join, exists
import argparse
def main(args):
prefix = os.path.abspath(os.path.dirname(__file__))
# Let's read the opcode and type files
with open(join(prefix, '..', '..', 'core', 'codegen', 'types.json')) as f:
types = json.loads(f.read())
type_map = {}
for t in types[:-1]:
type_map[t['enum']] = {
'cpp' : t['cpp'],
'bhc' : t['bhc'],
'name' : t['union'],
'id' : t['id'],
'bhc_ary' : "bhc_ndarray_%s_p" % t['union']
}
# Let's generate the header and implementation of all data types
head = ""; impl = ""
head += "// Forward declaration of array types:\n"
for key, val in type_map.items():
head += "struct bhc_ndarray_%s;\n" % val['name']
head += "\n// Pointer shorthands:\n"
for key, val in type_map.items():
head += "typedef struct bhc_ndarray_%s* %s;\n" % (val['name'], val['bhc_ary'])
head += "\n// Type enum:\n"
head += "typedef enum {\n"
for key, val in type_map.items():
head += " %s = %d, \n" % (key, val['id'])
head += "} bhc_dtype; // Fits 5-bits\n"
impl += "// Array types:\n"
for key, val in type_map.items():
impl += "struct bhc_ndarray_%s {bhxx::BhArray<%s> me;};\n" % (val['name'], val['cpp'])
with open(join(prefix, '..', '..', 'core', 'codegen', 'opcodes.json')) as f:
opcodes = json.loads(f.read())
head += "\n// Opcodes enum:\n"
head += "typedef enum {\n"
for op in opcodes:
head += " %s = %s, \n" % (op['opcode'].replace("BH_", "BHC_"), op['id'])
head += "} bhc_opcode;\n"
# Let's add header and footer
head = """/* Bohrium C Bridge: data types. Auto generated! */
#ifndef BHC_TYPES_H
#define BHC_TYPES_H
#include <stdint.h>
typedef unsigned char bhc_bool;
typedef int8_t bhc_int8;
typedef int16_t bhc_int16;
typedef int32_t bhc_int32;
typedef int64_t bhc_int64;
typedef uint8_t bhc_uint8;
typedef uint16_t bhc_uint16;
typedef uint32_t bhc_uint32;
typedef uint64_t bhc_uint64;
typedef float bhc_float32;
typedef double bhc_float64;
typedef struct { bhc_float32 real, imag; } bhc_complex64;
typedef struct { bhc_float64 real, imag; } bhc_complex128;
typedef struct { bhc_uint64 start, key; } bhc_r123;
typedef union {
bhc_bool bool8;
bhc_int8 int8;
bhc_int16 int16;
bhc_int32 int32;
bhc_int64 int64;
bhc_uint8 uint8;
bhc_uint16 uint16;
bhc_uint32 uint32;
bhc_uint64 uint64;
bhc_float32 float32;
bhc_float64 float64;
bhc_complex64 complex64;
bhc_complex128 complex128;
bhc_r123 r123;
} bhc_scalar_union;
#ifdef __cplusplus
extern "C" {
#endif
%s
#ifdef __cplusplus
}
#endif
#endif /* !defined(BHC_TYPES_H) */
""" % head
impl = """/* Bohrium C Bridge: data types. Auto generated! */
#include <bhxx/bhxx.hpp>
#include "bhc.h"
%s
""" % impl
# Finally, let's write the files
with open(join(args.output, 'bhc_types.h'), 'w') as f:
f.write(head)
with open(join(args.output, 'bhc_types.cpp'), 'w') as f:
f.write(impl)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Generates the type source files for the Bohrium C bridge.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'output',
help='Path to the output directory.'
)
args = parser.parse_args()
if not os.path.exists(args.output):
os.makedirs(args.output)
main(args)
| madsbk/bohrium | bridge/c/gen_types.py | Python | apache-2.0 | 3,684 |
from __future__ import absolute_import
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import Http404
from django.shortcuts import redirect
from django.utils.translation import ugettext
from django.views.decorators.http import require_POST
import accounts.payment_plans as plans
import analytics.query as analytics_query
import pinecast.email
from accounts.decorators import restrict_minimum_plan
from accounts.models import Network, UserSettings
from pinecast.helpers import get_object_or_404, populate_context, render, reverse, round_now
from podcasts.models import Podcast, PodcastEpisode
def get_network(req, id):
if req.user.is_staff:
return get_object_or_404(Network, deactivated=False, id=id)
else:
return get_object_or_404(Network, deactivated=False, id=id, members__in=[req.user])
@login_required
@restrict_minimum_plan(plans.PLAN_PRO)
def new_network(req):
uset = UserSettings.get_from_user(req.user)
if not req.POST:
return render(req, 'dashboard/network/page_new.html')
try:
net = Network(
name=req.POST.get('name'),
owner=req.user,
)
net.save()
net.members.add(req.user)
net.save()
except Exception as e:
return render(req,
'dashboard/network/page_new.html',
{'error': ugettext('Error while saving network details'),
'default': req.POST})
return redirect('network_dashboard', network_id=net.id)
@login_required
def network_dashboard(req, network_id):
net = get_network(req, network_id)
net_podcasts = net.podcast_set.all()
pod_map = {str(p.id): p for p in net_podcasts}
top_episodes_data = analytics_query.get_top_episodes([str(p.id) for p in net_podcasts])
top_episodes = []
top_episodes_listing = sorted(top_episodes_data.items(), key=lambda x: -1 * x[1])[:75]
fetched_eps_map = {
str(ep.id): ep for
ep in
PodcastEpisode.objects.filter(id__in=[x for x, _ in top_episodes_listing])
}
for ep_id, count in top_episodes_listing:
episode = fetched_eps_map.get(ep_id)
if not episode:
continue
top_episodes.append({
'count': count,
'episode': episode,
'podcast': pod_map[str(episode.podcast_id)],
})
upcoming_episodes = PodcastEpisode.objects.filter(
podcast__in=net_podcasts,
publish__gt=round_now())
ctx = {
'error': req.GET.get('error'),
'network': net,
'net_podcasts': net_podcasts,
'net_podcasts_map': pod_map,
'top_episodes': top_episodes,
'upcoming_episodes': list(upcoming_episodes),
}
populate_context(req.user, ctx)
net_pod_ids = [x.id for x in net_podcasts]
ctx['net_pod_ids'] = net_pod_ids
ctx['has_pods_to_add'] = any(pod.id not in net_pod_ids for pod in ctx['podcasts'])
return render(req, 'dashboard/network/page_dash.html', ctx)
@require_POST
@login_required
def network_add_show(req, network_id):
net = get_network(req, network_id)
slug = req.POST.get('slug')
try:
pod = Podcast.objects.get(slug__iexact=slug)
except Podcast.DoesNotExist:
return redirect(reverse('network_dashboard', network_id=net.id) + '?error=aslg#shows,add-show')
else:
if pod.owner != req.user:
return redirect(reverse('network_dashboard', network_id=net.id) + '?error=nown#shows,add-show')
pod.networks.add(net)
pod.save()
return redirect(reverse('network_dashboard', network_id=net.id) + '#shows')
@require_POST
@login_required
def network_add_member(req, network_id):
net = get_network(req, network_id)
try:
user = User.objects.get(email__iexact=req.POST.get('email'))
except User.DoesNotExist:
return redirect(reverse('network_dashboard', network_id=network_id) + '?error=udne#members,add-member')
net.members.add(user)
net.save()
pinecast.email.send_notification_email(
user,
ugettext('[Pinecast] You have been added to "%s"') % net.name,
ugettext('''
We are emailing you to let you know that you were added to the network
"%s". No action is required on your part. If you log in to Pinecast,
you will now have read and write access to all of the podcasts in the
network, and will be able to add your own podcasts to the network.
''') % net.name
)
return redirect(reverse('network_dashboard', network_id=net.id) + '#members')
@require_POST
@login_required
def network_edit(req, network_id):
net = get_network(req, network_id)
try:
net.name = req.POST.get('name')
net.save()
except Exception as e:
# TODO: maybe handle this better?
pass
return redirect('network_dashboard', network_id=net.id)
@require_POST
@login_required
def network_deactivate(req, network_id):
net = get_object_or_404(Network, deactivated=False, id=network_id, owner=req.user)
if req.POST.get('confirm') != 'doit':
return redirect('dashboard')
net.deactivated = True
net.save()
return redirect('dashboard')
@require_POST
@login_required
def network_remove_podcast(req, network_id, podcast_slug):
net = get_network(req, network_id)
pod = get_object_or_404(Podcast, slug=podcast_slug, networks__in=[net])
# We don't need to confirm if the user is the owner.
if pod.owner == req.user:
pod.networks.remove(net)
pod.save()
return redirect('network_dashboard', network_id=net.id)
if req.user != net.owner:
raise Http404()
pod.networks.remove(net)
pod.save()
return redirect('network_dashboard', network_id=net.id)
@require_POST
@login_required
def network_remove_member(req, network_id, member_id):
net = get_network(req, network_id)
user = get_object_or_404(User, id=member_id)
if not net.members.filter(username=user.username).exists():
raise Http404()
# We don't need to confirm if the user is the owner.
if net.owner == user:
return redirect('network_dashboard', network_id=net.id)
pods = Podcast.objects.filter(owner=user, networks__in=[net])
for pod in pods:
pod.networks.remove(net)
pod.save()
net.members.remove(user)
net.save()
return redirect(reverse('network_dashboard', network_id=net.id) + '#members')
| Pinecast/pinecast | dashboard/views_network.py | Python | apache-2.0 | 6,500 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import pytest
from tvm.driver.tvmc import TVMCException
from tvm.driver.tvmc.registry import generate_registry_args, reconstruct_registry_entity
from tvm.relay.backend import Executor
def test_registry_to_argparse():
parser = argparse.ArgumentParser()
generate_registry_args(parser, Executor)
parsed, _ = parser.parse_known_args(["--executor=aot", "--executor-aot-interface-api=c"])
assert parsed.executor == "aot"
assert parsed.executor_aot_interface_api == "c"
def test_registry_to_argparse_default():
parser = argparse.ArgumentParser()
generate_registry_args(parser, Executor, "aot")
parsed, _ = parser.parse_known_args([])
assert parsed.executor == "aot"
def test_mapping_registered_args():
parser = argparse.ArgumentParser()
generate_registry_args(parser, Executor)
parsed, _ = parser.parse_known_args(["--executor=aot", "--executor-aot-interface-api=c"])
entity = reconstruct_registry_entity(parsed, Executor)
assert isinstance(entity, Executor)
assert "interface-api" in entity
assert entity["interface-api"] == "c"
def test_mapping_registered_args_no_match_for_name():
parser = argparse.ArgumentParser()
generate_registry_args(parser, Executor)
parsed, _ = parser.parse_known_args(["--executor=woof"])
with pytest.raises(TVMCException, match='Executor "woof" is not defined'):
reconstruct_registry_entity(parsed, Executor)
def test_mapping_registered_args_no_arg():
parser = argparse.ArgumentParser()
generate_registry_args(parser, Executor)
parsed, _ = parser.parse_known_args([])
assert reconstruct_registry_entity(parsed, Executor) == None
def test_mapping_registered_args_mismatch_for_arg():
parser = argparse.ArgumentParser()
generate_registry_args(parser, Executor)
parsed, _ = parser.parse_known_args(["--executor=aot", "--executor-graph-link-params=1"])
with pytest.raises(
TVMCException,
match="Passed --executor-graph-link-params but did not specify graph executor",
):
reconstruct_registry_entity(parsed, Executor)
| Laurawly/tvm-1 | tests/python/driver/tvmc/test_registry_options.py | Python | apache-2.0 | 2,904 |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2common import log as logging
from st2common.services.rules import get_rules_given_trigger
from st2common.services.triggers import get_trigger_db_by_ref
from st2reactor.rules.enforcer import RuleEnforcer
from st2reactor.rules.matcher import RulesMatcher
from st2common.metrics.base import get_driver
LOG = logging.getLogger("st2reactor.rules.RulesEngine")
__all__ = ["RulesEngine"]
class RulesEngine(object):
def handle_trigger_instance(self, trigger_instance):
# Find matching rules for trigger instance.
matching_rules = self.get_matching_rules_for_trigger(trigger_instance)
if matching_rules:
# Create rule enforcers.
enforcers = self.create_rule_enforcers(trigger_instance, matching_rules)
# Enforce the rules.
self.enforce_rules(enforcers)
else:
LOG.info(
"No matching rules found for trigger instance %s.",
trigger_instance["id"],
)
def get_matching_rules_for_trigger(self, trigger_instance):
trigger = trigger_instance.trigger
trigger_db = get_trigger_db_by_ref(trigger_instance.trigger)
if not trigger_db:
LOG.error(
"No matching trigger found in db for trigger instance %s.",
trigger_instance,
)
return None
rules = get_rules_given_trigger(trigger=trigger)
LOG.info(
"Found %d rules defined for trigger %s",
len(rules),
trigger_db.get_reference().ref,
)
if len(rules) < 1:
return rules
matcher = RulesMatcher(
trigger_instance=trigger_instance, trigger=trigger_db, rules=rules
)
matching_rules = matcher.get_matching_rules()
LOG.info(
"Matched %s rule(s) for trigger_instance %s (trigger=%s)",
len(matching_rules),
trigger_instance["id"],
trigger_db.ref,
)
return matching_rules
def create_rule_enforcers(self, trigger_instance, matching_rules):
"""
Creates a RuleEnforcer matching to each rule.
This method is trigger_instance specific therefore if creation of 1 RuleEnforcer
fails it is likely that all wil be broken.
"""
metrics_driver = get_driver()
enforcers = []
for matching_rule in matching_rules:
metrics_driver.inc_counter("rule.matched")
metrics_driver.inc_counter("rule.%s.matched" % (matching_rule.ref))
enforcers.append(RuleEnforcer(trigger_instance, matching_rule))
return enforcers
def enforce_rules(self, enforcers):
for enforcer in enforcers:
try:
enforcer.enforce() # Should this happen in an eventlet pool?
except:
LOG.exception("Exception enforcing rule %s.", enforcer.rule)
| nzlosh/st2 | st2reactor/st2reactor/rules/engine.py | Python | apache-2.0 | 3,603 |
#!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
Copy bags between disks.
"""
from __future__ import print_function
import argparse
import glob
import os
import shutil
import sys
from rosbag.bag import Bag
import psutil
from modules.canbus.proto.chassis_pb2 import Chassis
K_CHASSIS_TOPIC = '/apollo/canbus/chassis'
K_DRIVE_EVENT_TOPIC = '/apollo/drive_event'
K_LARGE_SIZE_TOPICS = set([
'/apollo/sensor/camera/obstacle/front_6mm',
'/apollo/sensor/camera/traffic/image_long',
'/apollo/sensor/camera/traffic/image_short',
'/apollo/sensor/velodyne64/compensator/PointCloud2',
])
K_COPY_LARGE_SIZE_TOPICS_SECONDS_BEFORE_EVENT = 20.0
def GetDisks():
"""Get disks, which should be mounted under /media."""
disks = [disk.mountpoint for disk in psutil.disk_partitions()
if disk.mountpoint.startswith('/media/')]
disks.append('/apollo')
# The disks are sorted like
# /media/apollo/internal_nvme
# /media/apollo/apollo8
# /apollo
disks = sorted(disks, reverse=True)
if len(disks) <= 1:
print('Cannot find disks.')
sys.exit(1)
copy_from = None
copy_to = None
for index, disk in enumerate(disks):
print('\t{}: {}'.format(index, disk))
try:
selected = int(input('Which disk do you want to copy from: '))
copy_from = disks[selected]
except:
print('Bad input')
sys.exit(1)
for index, disk in enumerate(disks):
if index != selected:
print('\t{}: {}'.format(index, disk))
try:
selected = int(input('Which disk do you want to copy to: '))
copy_to = disks[selected]
except:
print('Bad input')
sys.exit(1)
if copy_from and copy_to and (copy_from != copy_to):
print('Copy disk: {} -> {}'.format(copy_from, copy_to))
else:
sys.exit(1)
return copy_from, copy_to
def CollectEvents(bags):
"""Collect interested event timestamps."""
print('Collecting events...', end='')
events = []
cur_driving_mode = None
for bag_file in bags:
with Bag(bag_file, 'r') as bag:
for topic, msg, t in bag.read_messages(topics=[K_CHASSIS_TOPIC,
K_DRIVE_EVENT_TOPIC]):
# For disengagement, take the message time as event time.
if topic == K_CHASSIS_TOPIC:
if (cur_driving_mode == Chassis.COMPLETE_AUTO_DRIVE and
msg.driving_mode == Chassis.EMERGENCY_MODE):
events.append(t.to_sec())
cur_driving_mode = msg.driving_mode
# For DriveEvent, take the header time as event time.
elif topic == K_DRIVE_EVENT_TOPIC:
events.append(msg.header.timestamp_sec)
print('Collected {} events.'.format(len(events)))
return events
def SmartCopyBags(from_dir, to_dir):
"""Copy a task but filter useless sensor data."""
bags = sorted(glob.glob(os.path.join(from_dir, '*.bag')))
if len(bags) == 0:
return
if not os.path.exists(to_dir):
os.makedirs(to_dir)
events = CollectEvents(bags)
next_event = 0
for from_bag in bags:
to_bag = os.path.join(to_dir, os.path.basename(from_bag))
print('Copy bag: {} -> {}'.format(from_bag, to_bag))
# Do the copy
with Bag(from_bag, 'r') as bag_in, Bag(to_bag, 'w') as bag_out:
for topic, msg, t in bag_in.read_messages():
# For small size topics, always copy.
if topic not in K_LARGE_SIZE_TOPICS:
bag_out.write(topic, msg, t)
continue
msg_sec = t.to_sec()
while next_event < len(events) and events[next_event] < msg_sec:
next_event += 1
# For large size topics, only copy when it's near an event.
if (next_event < len(events) and events[next_event] - msg_sec <
K_COPY_LARGE_SIZE_TOPICS_SECONDS_BEFORE_EVENT):
bag_out.write(topic, msg, t)
def SmartCopyDir(from_dir, to_dir):
"""Copy directory."""
print('Copy dir: {} -> {}'.format(from_dir, to_dir))
is_task_dir = False
for f in sorted(os.listdir(from_dir), reverse=True):
sub_path = os.path.join(from_dir, f)
if os.path.isdir(sub_path):
SmartCopyDir(sub_path, os.path.join(to_dir, f))
continue
if f.endswith('.bag.active'):
# Found unindexed bag, always copy it.
# TODO(xiaoxq): Index the bag and go to next step.
shutil.copy(sub_path, to_dir)
continue
if f.endswith('.bag'):
is_task_dir = True
break;
if is_task_dir:
SmartCopyBags(from_dir, to_dir)
def main():
"""Do the job."""
copy_from, copy_to = GetDisks()
from_dir = os.path.join(copy_from, 'data/bag')
if not os.path.exists(from_dir):
print('Bag dir doesn\'t exist:', from_dir)
sys.exit(1)
to_dir = os.path.join(copy_to, 'data/bag')
print('\t1. Only keep sensor data for drive events')
print('\t2. Eveything')
selected = input('What kind of data do you need: ')
if selected == '1':
SmartCopyDir(from_dir, to_dir)
elif selected == '2':
shutil.copytree(from_dir, to_dir)
else:
print('Bad input')
sys.exit(1)
# TODO(xiaoxq): We always try to make data structurized, such as putting
# them into rosbag, instead of copying raw files around.
other_data_dirs = {'/apollo/data/gpsbin': 'data/gpsbin'}
for src, dst in other_data_dirs.iteritems():
if os.path.exists(src):
print('Copying ', src)
shutil.copytree(src, os.path.join(copy_to, dst))
if __name__ == "__main__":
main()
| ycool/apollo | modules/tools/rosbag/copy_bags_between_disks.py | Python | apache-2.0 | 6,623 |
"""SmartApp functionality to receive cloud-push notifications."""
import asyncio
import functools
import logging
import secrets
from urllib.parse import urlparse
from uuid import uuid4
from aiohttp import web
from pysmartapp import Dispatcher, SmartAppManager
from pysmartapp.const import SETTINGS_APP_ID
from pysmartthings import (
APP_TYPE_WEBHOOK,
CAPABILITIES,
CLASSIFICATION_AUTOMATION,
App,
AppOAuth,
AppSettings,
InstalledAppStatus,
SmartThings,
SourceType,
Subscription,
SubscriptionEntity,
)
from homeassistant.components import webhook
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.network import NoURLAvailableError, get_url
from .const import (
APP_NAME_PREFIX,
APP_OAUTH_CLIENT_NAME,
APP_OAUTH_SCOPES,
CONF_CLOUDHOOK_URL,
CONF_INSTALLED_APP_ID,
CONF_INSTANCE_ID,
CONF_REFRESH_TOKEN,
DATA_BROKERS,
DATA_MANAGER,
DOMAIN,
IGNORED_CAPABILITIES,
SETTINGS_INSTANCE_ID,
SIGNAL_SMARTAPP_PREFIX,
STORAGE_KEY,
STORAGE_VERSION,
SUBSCRIPTION_WARNING_LIMIT,
)
_LOGGER = logging.getLogger(__name__)
def format_unique_id(app_id: str, location_id: str) -> str:
"""Format the unique id for a config entry."""
return f"{app_id}_{location_id}"
async def find_app(hass: HomeAssistant, api):
"""Find an existing SmartApp for this installation of hass."""
apps = await api.apps()
for app in [app for app in apps if app.app_name.startswith(APP_NAME_PREFIX)]:
# Load settings to compare instance id
settings = await app.settings()
if (
settings.settings.get(SETTINGS_INSTANCE_ID)
== hass.data[DOMAIN][CONF_INSTANCE_ID]
):
return app
async def validate_installed_app(api, installed_app_id: str):
"""
Ensure the specified installed SmartApp is valid and functioning.
Query the API for the installed SmartApp and validate that it is tied to
the specified app_id and is in an authorized state.
"""
installed_app = await api.installed_app(installed_app_id)
if installed_app.installed_app_status != InstalledAppStatus.AUTHORIZED:
raise RuntimeWarning(
"Installed SmartApp instance '{}' ({}) is not AUTHORIZED but instead {}".format(
installed_app.display_name,
installed_app.installed_app_id,
installed_app.installed_app_status,
)
)
return installed_app
def validate_webhook_requirements(hass: HomeAssistant) -> bool:
"""Ensure Home Assistant is setup properly to receive webhooks."""
if hass.components.cloud.async_active_subscription():
return True
if hass.data[DOMAIN][CONF_CLOUDHOOK_URL] is not None:
return True
return get_webhook_url(hass).lower().startswith("https://")
def get_webhook_url(hass: HomeAssistant) -> str:
"""
Get the URL of the webhook.
Return the cloudhook if available, otherwise local webhook.
"""
cloudhook_url = hass.data[DOMAIN][CONF_CLOUDHOOK_URL]
if hass.components.cloud.async_active_subscription() and cloudhook_url is not None:
return cloudhook_url
return webhook.async_generate_url(hass, hass.data[DOMAIN][CONF_WEBHOOK_ID])
def _get_app_template(hass: HomeAssistant):
try:
endpoint = f"at {get_url(hass, allow_cloud=False, prefer_external=True)}"
except NoURLAvailableError:
endpoint = ""
cloudhook_url = hass.data[DOMAIN][CONF_CLOUDHOOK_URL]
if cloudhook_url is not None:
endpoint = "via Nabu Casa"
description = f"{hass.config.location_name} {endpoint}"
return {
"app_name": APP_NAME_PREFIX + str(uuid4()),
"display_name": "Home Assistant",
"description": description,
"webhook_target_url": get_webhook_url(hass),
"app_type": APP_TYPE_WEBHOOK,
"single_instance": True,
"classifications": [CLASSIFICATION_AUTOMATION],
}
async def create_app(hass: HomeAssistant, api):
"""Create a SmartApp for this instance of hass."""
# Create app from template attributes
template = _get_app_template(hass)
app = App()
for key, value in template.items():
setattr(app, key, value)
app, client = await api.create_app(app)
_LOGGER.debug("Created SmartApp '%s' (%s)", app.app_name, app.app_id)
# Set unique hass id in settings
settings = AppSettings(app.app_id)
settings.settings[SETTINGS_APP_ID] = app.app_id
settings.settings[SETTINGS_INSTANCE_ID] = hass.data[DOMAIN][CONF_INSTANCE_ID]
await api.update_app_settings(settings)
_LOGGER.debug(
"Updated App Settings for SmartApp '%s' (%s)", app.app_name, app.app_id
)
# Set oauth scopes
oauth = AppOAuth(app.app_id)
oauth.client_name = APP_OAUTH_CLIENT_NAME
oauth.scope.extend(APP_OAUTH_SCOPES)
await api.update_app_oauth(oauth)
_LOGGER.debug("Updated App OAuth for SmartApp '%s' (%s)", app.app_name, app.app_id)
return app, client
async def update_app(hass: HomeAssistant, app):
"""Ensure the SmartApp is up-to-date and update if necessary."""
template = _get_app_template(hass)
template.pop("app_name") # don't update this
update_required = False
for key, value in template.items():
if getattr(app, key) != value:
update_required = True
setattr(app, key, value)
if update_required:
await app.save()
_LOGGER.debug(
"SmartApp '%s' (%s) updated with latest settings", app.app_name, app.app_id
)
def setup_smartapp(hass, app):
"""
Configure an individual SmartApp in hass.
Register the SmartApp with the SmartAppManager so that hass will service
lifecycle events (install, event, etc...). A unique SmartApp is created
for each SmartThings account that is configured in hass.
"""
manager = hass.data[DOMAIN][DATA_MANAGER]
if smartapp := manager.smartapps.get(app.app_id):
# already setup
return smartapp
smartapp = manager.register(app.app_id, app.webhook_public_key)
smartapp.name = app.display_name
smartapp.description = app.description
smartapp.permissions.extend(APP_OAUTH_SCOPES)
return smartapp
async def setup_smartapp_endpoint(hass: HomeAssistant):
"""
Configure the SmartApp webhook in hass.
SmartApps are an extension point within the SmartThings ecosystem and
is used to receive push updates (i.e. device updates) from the cloud.
"""
if hass.data.get(DOMAIN):
# already setup
return
# Get/create config to store a unique id for this hass instance.
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
if not (config := await store.async_load()):
# Create config
config = {
CONF_INSTANCE_ID: str(uuid4()),
CONF_WEBHOOK_ID: secrets.token_hex(),
CONF_CLOUDHOOK_URL: None,
}
await store.async_save(config)
# Register webhook
webhook.async_register(
hass, DOMAIN, "SmartApp", config[CONF_WEBHOOK_ID], smartapp_webhook
)
# Create webhook if eligible
cloudhook_url = config.get(CONF_CLOUDHOOK_URL)
if (
cloudhook_url is None
and hass.components.cloud.async_active_subscription()
and not hass.config_entries.async_entries(DOMAIN)
):
cloudhook_url = await hass.components.cloud.async_create_cloudhook(
config[CONF_WEBHOOK_ID]
)
config[CONF_CLOUDHOOK_URL] = cloudhook_url
await store.async_save(config)
_LOGGER.debug("Created cloudhook '%s'", cloudhook_url)
# SmartAppManager uses a dispatcher to invoke callbacks when push events
# occur. Use hass' implementation instead of the built-in one.
dispatcher = Dispatcher(
signal_prefix=SIGNAL_SMARTAPP_PREFIX,
connect=functools.partial(async_dispatcher_connect, hass),
send=functools.partial(async_dispatcher_send, hass),
)
# Path is used in digital signature validation
path = (
urlparse(cloudhook_url).path
if cloudhook_url
else webhook.async_generate_path(config[CONF_WEBHOOK_ID])
)
manager = SmartAppManager(path, dispatcher=dispatcher)
manager.connect_install(functools.partial(smartapp_install, hass))
manager.connect_update(functools.partial(smartapp_update, hass))
manager.connect_uninstall(functools.partial(smartapp_uninstall, hass))
hass.data[DOMAIN] = {
DATA_MANAGER: manager,
CONF_INSTANCE_ID: config[CONF_INSTANCE_ID],
DATA_BROKERS: {},
CONF_WEBHOOK_ID: config[CONF_WEBHOOK_ID],
# Will not be present if not enabled
CONF_CLOUDHOOK_URL: config.get(CONF_CLOUDHOOK_URL),
}
_LOGGER.debug(
"Setup endpoint for %s",
cloudhook_url
if cloudhook_url
else webhook.async_generate_url(hass, config[CONF_WEBHOOK_ID]),
)
async def unload_smartapp_endpoint(hass: HomeAssistant):
"""Tear down the component configuration."""
if DOMAIN not in hass.data:
return
# Remove the cloudhook if it was created
cloudhook_url = hass.data[DOMAIN][CONF_CLOUDHOOK_URL]
if cloudhook_url and hass.components.cloud.async_is_logged_in():
await hass.components.cloud.async_delete_cloudhook(
hass.data[DOMAIN][CONF_WEBHOOK_ID]
)
# Remove cloudhook from storage
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
await store.async_save(
{
CONF_INSTANCE_ID: hass.data[DOMAIN][CONF_INSTANCE_ID],
CONF_WEBHOOK_ID: hass.data[DOMAIN][CONF_WEBHOOK_ID],
CONF_CLOUDHOOK_URL: None,
}
)
_LOGGER.debug("Cloudhook '%s' was removed", cloudhook_url)
# Remove the webhook
webhook.async_unregister(hass, hass.data[DOMAIN][CONF_WEBHOOK_ID])
# Disconnect all brokers
for broker in hass.data[DOMAIN][DATA_BROKERS].values():
broker.disconnect()
# Remove all handlers from manager
hass.data[DOMAIN][DATA_MANAGER].dispatcher.disconnect_all()
# Remove the component data
hass.data.pop(DOMAIN)
async def smartapp_sync_subscriptions(
hass: HomeAssistant,
auth_token: str,
location_id: str,
installed_app_id: str,
devices,
):
"""Synchronize subscriptions of an installed up."""
api = SmartThings(async_get_clientsession(hass), auth_token)
tasks = []
async def create_subscription(target: str):
sub = Subscription()
sub.installed_app_id = installed_app_id
sub.location_id = location_id
sub.source_type = SourceType.CAPABILITY
sub.capability = target
try:
await api.create_subscription(sub)
_LOGGER.debug(
"Created subscription for '%s' under app '%s'", target, installed_app_id
)
except Exception as error: # pylint:disable=broad-except
_LOGGER.error(
"Failed to create subscription for '%s' under app '%s': %s",
target,
installed_app_id,
error,
)
async def delete_subscription(sub: SubscriptionEntity):
try:
await api.delete_subscription(installed_app_id, sub.subscription_id)
_LOGGER.debug(
"Removed subscription for '%s' under app '%s' because it was no longer needed",
sub.capability,
installed_app_id,
)
except Exception as error: # pylint:disable=broad-except
_LOGGER.error(
"Failed to remove subscription for '%s' under app '%s': %s",
sub.capability,
installed_app_id,
error,
)
# Build set of capabilities and prune unsupported ones
capabilities = set()
for device in devices:
capabilities.update(device.capabilities)
# Remove items not defined in the library
capabilities.intersection_update(CAPABILITIES)
# Remove unused capabilities
capabilities.difference_update(IGNORED_CAPABILITIES)
capability_count = len(capabilities)
if capability_count > SUBSCRIPTION_WARNING_LIMIT:
_LOGGER.warning(
"Some device attributes may not receive push updates and there may be subscription "
"creation failures under app '%s' because %s subscriptions are required but "
"there is a limit of %s per app",
installed_app_id,
capability_count,
SUBSCRIPTION_WARNING_LIMIT,
)
_LOGGER.debug(
"Synchronizing subscriptions for %s capabilities under app '%s': %s",
capability_count,
installed_app_id,
capabilities,
)
# Get current subscriptions and find differences
subscriptions = await api.subscriptions(installed_app_id)
for subscription in subscriptions:
if subscription.capability in capabilities:
capabilities.remove(subscription.capability)
else:
# Delete the subscription
tasks.append(delete_subscription(subscription))
# Remaining capabilities need subscriptions created
tasks.extend([create_subscription(c) for c in capabilities])
if tasks:
await asyncio.gather(*tasks)
else:
_LOGGER.debug("Subscriptions for app '%s' are up-to-date", installed_app_id)
async def _continue_flow(
hass: HomeAssistant,
app_id: str,
location_id: str,
installed_app_id: str,
refresh_token: str,
):
"""Continue a config flow if one is in progress for the specific installed app."""
unique_id = format_unique_id(app_id, location_id)
flow = next(
(
flow
for flow in hass.config_entries.flow.async_progress_by_handler(DOMAIN)
if flow["context"]["unique_id"] == unique_id
),
None,
)
if flow is not None:
await hass.config_entries.flow.async_configure(
flow["flow_id"],
{
CONF_INSTALLED_APP_ID: installed_app_id,
CONF_REFRESH_TOKEN: refresh_token,
},
)
_LOGGER.debug(
"Continued config flow '%s' for SmartApp '%s' under parent app '%s'",
flow["flow_id"],
installed_app_id,
app_id,
)
async def smartapp_install(hass: HomeAssistant, req, resp, app):
"""Handle a SmartApp installation and continue the config flow."""
await _continue_flow(
hass, app.app_id, req.location_id, req.installed_app_id, req.refresh_token
)
_LOGGER.debug(
"Installed SmartApp '%s' under parent app '%s'",
req.installed_app_id,
app.app_id,
)
async def smartapp_update(hass: HomeAssistant, req, resp, app):
"""Handle a SmartApp update and either update the entry or continue the flow."""
entry = next(
(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data.get(CONF_INSTALLED_APP_ID) == req.installed_app_id
),
None,
)
if entry:
hass.config_entries.async_update_entry(
entry, data={**entry.data, CONF_REFRESH_TOKEN: req.refresh_token}
)
_LOGGER.debug(
"Updated config entry '%s' for SmartApp '%s' under parent app '%s'",
entry.entry_id,
req.installed_app_id,
app.app_id,
)
await _continue_flow(
hass, app.app_id, req.location_id, req.installed_app_id, req.refresh_token
)
_LOGGER.debug(
"Updated SmartApp '%s' under parent app '%s'", req.installed_app_id, app.app_id
)
async def smartapp_uninstall(hass: HomeAssistant, req, resp, app):
"""
Handle when a SmartApp is removed from a location by the user.
Find and delete the config entry representing the integration.
"""
entry = next(
(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data.get(CONF_INSTALLED_APP_ID) == req.installed_app_id
),
None,
)
if entry:
# Add as job not needed because the current coroutine was invoked
# from the dispatcher and is not being awaited.
await hass.config_entries.async_remove(entry.entry_id)
_LOGGER.debug(
"Uninstalled SmartApp '%s' under parent app '%s'",
req.installed_app_id,
app.app_id,
)
async def smartapp_webhook(hass: HomeAssistant, webhook_id: str, request):
"""
Handle a smartapp lifecycle event callback from SmartThings.
Requests from SmartThings are digitally signed and the SmartAppManager
validates the signature for authenticity.
"""
manager = hass.data[DOMAIN][DATA_MANAGER]
data = await request.json()
result = await manager.handle_request(data, request.headers)
return web.json_response(result)
| jawilson/home-assistant | homeassistant/components/smartthings/smartapp.py | Python | apache-2.0 | 17,223 |
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from nose.tools import * # noqa: F403
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from osf_tests.factories import (
AuthUserFactory,
PreprintFactory
)
from osf.utils.permissions import WRITE
from osf.utils.workflows import DefaultStates
from addons.osfstorage.models import OsfStorageFile
class TestPreprintProvidersList(ApiTestCase):
def setUp(self):
super(TestPreprintProvidersList, self).setUp()
self.user = AuthUserFactory()
self.preprint = PreprintFactory(creator=self.user)
self.url = '/{}preprints/{}/files/'.format(API_BASE, self.preprint._id)
self.user_two = AuthUserFactory()
def test_published_preprint_files(self):
# Unauthenticated
res = self.app.get(self.url)
assert res.status_code == 200
# Noncontrib
res = self.app.get(self.url, auth=self.user_two.auth)
assert res.status_code == 200
# Write contributor
self.preprint.add_contributor(self.user_two, WRITE, save=True)
res = self.app.get(self.url, auth=self.user_two.auth)
assert res.status_code == 200
# Admin contrib
res = self.app.get(self.url, auth=self.user.auth)
assert res.status_code == 200
def test_unpublished_preprint_files(self):
self.preprint.is_published = False
self.preprint.save()
# Unauthenticated
res = self.app.get(self.url, expect_errors=True)
assert res.status_code == 401
# Noncontrib
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 403
# Write contributor
self.preprint.add_contributor(self.user_two, WRITE, save=True)
res = self.app.get(self.url, auth=self.user_two.auth)
assert res.status_code == 200
# Admin contrib
res = self.app.get(self.url, auth=self.user.auth)
assert res.status_code == 200
def test_private_preprint_files(self):
self.preprint.is_public = False
self.preprint.save()
# Unauthenticated
res = self.app.get(self.url, expect_errors=True)
assert res.status_code == 401
# Noncontrib
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 403
# Write contributor
self.preprint.add_contributor(self.user_two, WRITE, save=True)
res = self.app.get(self.url, auth=self.user_two.auth)
assert res.status_code == 200
# Admin contrib
res = self.app.get(self.url, auth=self.user.auth)
assert res.status_code == 200
def test_abandoned_preprint_files(self):
self.preprint.machine_state = DefaultStates.INITIAL.value
self.preprint.save()
# Unauthenticated
res = self.app.get(self.url, expect_errors=True)
assert res.status_code == 401
# Noncontrib
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 403
# Write contributor
self.preprint.add_contributor(self.user_two, WRITE, save=True)
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 403
# Admin contrib
res = self.app.get(self.url, auth=self.user.auth)
assert res.status_code == 200
def test_orphaned_preprint_files(self):
self.preprint.primary_file = None
self.preprint.save()
# Unauthenticated
res = self.app.get(self.url, expect_errors=True)
assert res.status_code == 401
# Noncontrib
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 403
# Write contributor
self.preprint.add_contributor(self.user_two, WRITE, save=True)
res = self.app.get(self.url, auth=self.user_two.auth)
assert res.status_code == 200
# Admin contrib
res = self.app.get(self.url, auth=self.user.auth)
assert res.status_code == 200
def test_deleted_preprint_files(self):
self.preprint.deleted = timezone.now()
self.preprint.save()
# Unauthenticated
res = self.app.get(self.url, expect_errors=True)
assert res.status_code == 404
# Noncontrib
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 404
# Write contributor
self.preprint.add_contributor(self.user_two, WRITE, save=True)
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 404
# Admin contrib
res = self.app.get(self.url, auth=self.user.auth, expect_errors=True)
assert res.status_code == 404
def test_withdrawn_preprint_files(self):
self.preprint.date_withdrawn = timezone.now()
self.preprint.save()
# Unauthenticated
res = self.app.get(self.url, expect_errors=True)
assert res.status_code == 401
# Noncontrib
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 403
# Write contributor
self.preprint.add_contributor(self.user_two, WRITE, save=True)
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 403
# Admin contrib
res = self.app.get(self.url,
auth=self.user.auth, expect_errors=True)
assert res.status_code == 403
def test_return_published_files_logged_out(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1)
assert_equal(
res.json['data'][0]['attributes']['provider'],
'osfstorage'
)
def test_does_not_return_storage_addons_link(self):
res = self.app.get(self.url, auth=self.user.auth)
assert_not_in('storage_addons', res.json['data'][0]['links'])
def test_does_not_return_new_folder_link(self):
res = self.app.get(self.url, auth=self.user.auth)
assert_not_in('new_folder', res.json['data'][0]['links'])
def test_returns_provider_data(self):
res = self.app.get(self.url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_true(isinstance(res.json['data'], list))
assert_equal(res.content_type, 'application/vnd.api+json')
data = res.json['data'][0]
assert_equal(data['attributes']['kind'], 'folder')
assert_equal(data['attributes']['name'], 'osfstorage')
assert_equal(data['attributes']['provider'], 'osfstorage')
assert_equal(data['attributes']['preprint'], self.preprint._id)
assert_equal(data['attributes']['path'], '/')
assert_equal(data['attributes']['node'], None)
def test_osfstorage_file_data_not_found(self):
res = self.app.get(
'{}osfstorage/{}'.format(self.url, self.preprint.primary_file._id), auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_returns_osfstorage_folder_version_two(self):
res = self.app.get(
'{}osfstorage/'.format(self.url), auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_returns_osf_storage_folder_version_two_point_two(self):
res = self.app.get(
'{}osfstorage/?version=2.2'.format(self.url), auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_osfstorage_folder_data_not_found(self):
fobj = self.preprint.root_folder.append_folder('NewFolder')
fobj.save()
res = self.app.get(
'{}osfstorage/{}'.format(self.url, fobj._id), auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
class TestPreprintFilesList(ApiTestCase):
def setUp(self):
super(TestPreprintFilesList, self).setUp()
self.user = AuthUserFactory()
self.preprint = PreprintFactory(creator=self.user)
self.url = '/{}preprints/{}/files/osfstorage/'.format(API_BASE, self.preprint._id)
self.user_two = AuthUserFactory()
def test_published_preprint_files(self):
# Unauthenticated
res = self.app.get(self.url)
assert res.status_code == 200
# Noncontrib
res = self.app.get(self.url, auth=self.user_two.auth)
assert res.status_code == 200
# Write contributor
self.preprint.add_contributor(self.user_two, WRITE, save=True)
res = self.app.get(self.url, auth=self.user_two.auth)
assert res.status_code == 200
# Admin contrib
res = self.app.get(self.url, auth=self.user.auth)
assert res.status_code == 200
def test_unpublished_preprint_files(self):
self.preprint.is_published = False
self.preprint.save()
# Unauthenticated
res = self.app.get(self.url, expect_errors=True)
assert res.status_code == 401
# Noncontrib
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 403
# Write contributor
self.preprint.add_contributor(self.user_two, WRITE, save=True)
res = self.app.get(self.url, auth=self.user_two.auth)
assert res.status_code == 200
# Admin contrib
res = self.app.get(self.url, auth=self.user.auth)
assert res.status_code == 200
def test_private_preprint_files(self):
self.preprint.is_public = False
self.preprint.save()
# Unauthenticated
res = self.app.get(self.url, expect_errors=True)
assert res.status_code == 401
# Noncontrib
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 403
# Write contributor
self.preprint.add_contributor(self.user_two, WRITE, save=True)
res = self.app.get(self.url, auth=self.user_two.auth)
assert res.status_code == 200
# Admin contrib
res = self.app.get(self.url, auth=self.user.auth)
assert res.status_code == 200
def test_abandoned_preprint_files(self):
self.preprint.machine_state = DefaultStates.INITIAL.value
self.preprint.save()
# Unauthenticated
res = self.app.get(self.url, expect_errors=True)
assert res.status_code == 401
# Noncontrib
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 403
# Write contributor
self.preprint.add_contributor(self.user_two, WRITE, save=True)
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 403
# Admin contrib
res = self.app.get(self.url, auth=self.user.auth)
assert res.status_code == 200
def test_orphaned_preprint_files(self):
self.preprint.primary_file = None
self.preprint.save()
# Unauthenticated
res = self.app.get(self.url, expect_errors=True)
assert res.status_code == 401
# Noncontrib
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 403
# Write contributor
self.preprint.add_contributor(self.user_two, WRITE, save=True)
res = self.app.get(self.url, auth=self.user_two.auth)
assert res.status_code == 200
# Admin contrib
res = self.app.get(self.url, auth=self.user.auth)
assert res.status_code == 200
def test_deleted_preprint_files(self):
self.preprint.deleted = timezone.now()
self.preprint.save()
# Unauthenticated
res = self.app.get(self.url, expect_errors=True)
assert res.status_code == 404
# Noncontrib
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 404
# Write contributor
self.preprint.add_contributor(self.user_two, WRITE, save=True)
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 404
# Admin contrib
res = self.app.get(self.url, auth=self.user.auth, expect_errors=True)
assert res.status_code == 404
def test_withdrawn_preprint_files(self):
self.preprint.date_withdrawn = timezone.now()
self.preprint.save()
# Unauthenticated
res = self.app.get(self.url, expect_errors=True)
assert res.status_code == 401
# Noncontrib
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 403
# Write contributor
self.preprint.add_contributor(self.user_two, WRITE, save=True)
res = self.app.get(self.url, auth=self.user_two.auth, expect_errors=True)
assert res.status_code == 403
# Admin contrib
res = self.app.get(self.url, auth=self.user.auth, expect_errors=True)
assert res.status_code == 403
def test_not_just_primary_file_returned(self):
filename = 'my second file'
second_file = OsfStorageFile.create(
target_object_id=self.preprint.id,
target_content_type=ContentType.objects.get_for_model(self.preprint),
path='/{}'.format(filename),
name=filename,
materialized_path='/{}'.format(filename))
second_file.save()
from addons.osfstorage import settings as osfstorage_settings
second_file.create_version(self.user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
second_file.parent = self.preprint.root_folder
second_file.save()
assert len(self.preprint.files.all()) == 2
res = self.app.get(self.url, auth=self.user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 2
assert data[0]['id'] == self.preprint.primary_file._id
def test_nested_file_as_primary_file_is_returned(self):
# Primary file can be any file nested somewhere under the preprint's root folder.
subfolder = self.preprint.root_folder.append_folder('subfolder')
subfolder.save()
primary_file = self.preprint.primary_file
primary_file.move_under(subfolder)
primary_file.save()
assert_equal(subfolder.children[0], primary_file)
assert_equal(primary_file.parent, subfolder)
res = self.app.get(self.url, auth=self.user.auth)
assert len(res.json['data']) == 1
data = res.json['data'][0]
assert data['id'] == subfolder._id
assert data['attributes']['kind'] == 'folder'
assert data['attributes']['path'] == '/{}/'.format(subfolder._id)
assert data['attributes']['materialized_path'] == '/{}/'.format(subfolder.name)
def test_cannot_access_other_addons(self):
url = '/{}preprints/{}/files/github/'.format(API_BASE, self.preprint._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert res.status_code == 404
| mattclark/osf.io | api_tests/preprints/views/test_preprint_files_list.py | Python | apache-2.0 | 15,540 |
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
#
# Traffic Control documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 20 13:17:23 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, sphinx
from sphinx.errors import VersionRequirementError
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.4.3'
if needs_sphinx > sphinx.__version__:
message = 'This project needs at least Sphinx v%s' % needs_sphinx
raise VersionRequirementError(message)
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.imgmath', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.imgmath', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Traffic Control'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '2_19_15 11:44'
# The full version, including alpha/beta/rc tags.
release = '2.1-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
html_theme_path = ["_themes", ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../../traffic_ops/app/public/images/tc_logo_c_only.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
html_favicon = '../../traffic_ops/app/public/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Traffic Control Doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'TrafficControl.tex', u'Traffic Control Documentation', 'Apache Software Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'traffic control', u'Traffic Control Documentation', 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Traffic Control', u'Traffic Control Documentation',
u'Apache Software Foundation', 'Traffic Control', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Traffic Control'
epub_author = u'Apache Software Foundation'
epub_publisher = u'Apache Software Foundation'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
def setup(app):
# overrides for wide tables in RTD theme
app.add_stylesheet('theme_overrides.css') # path relative to _static
| mdb/incubator-trafficcontrol | docs/source/conf.py | Python | apache-2.0 | 10,587 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import TYPE_CHECKING
import pandas as pd
from pandas.api.types import CategoricalDtype
if TYPE_CHECKING:
import pyspark.pandas as ps # noqa: F401 (SPARK-34943)
class CategoricalAccessor(object):
"""
Accessor object for categorical properties of the Series values.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.categories
Index(['a', 'b', 'c'], dtype='object')
>>> s.cat.codes
0 0
1 1
2 1
3 2
4 2
5 2
dtype: int8
"""
def __init__(self, series: "ps.Series"):
if not isinstance(series.dtype, CategoricalDtype):
raise ValueError("Cannot call CategoricalAccessor on type {}".format(series.dtype))
self._data = series
@property
def categories(self) -> pd.Index:
"""
The categories of this categorical.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.categories
Index(['a', 'b', 'c'], dtype='object')
"""
return self._data.dtype.categories
@categories.setter
def categories(self, categories: pd.Index) -> None:
raise NotImplementedError()
@property
def ordered(self) -> bool:
"""
Whether the categories have an ordered relationship.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.ordered
False
"""
return self._data.dtype.ordered
@property
def codes(self) -> "ps.Series":
"""
Return Series of codes as well as the index.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.codes
0 0
1 1
2 1
3 2
4 2
5 2
dtype: int8
"""
return self._data._with_new_scol(self._data.spark.column).rename()
def add_categories(self, new_categories: pd.Index, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def as_ordered(self, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def as_unordered(self, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def remove_categories(self, removals: pd.Index, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def remove_unused_categories(self) -> "ps.Series":
raise NotImplementedError()
def rename_categories(self, new_categories: pd.Index, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def reorder_categories(
self, new_categories: pd.Index, ordered: bool = None, inplace: bool = False
) -> "ps.Series":
raise NotImplementedError()
def set_categories(
self,
new_categories: pd.Index,
ordered: bool = None,
rename: bool = False,
inplace: bool = False,
) -> "ps.Series":
raise NotImplementedError()
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.categorical
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.categorical.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.categorical tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.categorical,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| maropu/spark | python/pyspark/pandas/categorical.py | Python | apache-2.0 | 5,290 |
# Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
import oslo_messaging
from neutron._i18n import _LE
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks.producer import registry
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.common import rpc as n_rpc
from neutron.db import api as db_api
from neutron.extensions import portbindings
from neutron import manager
from neutron.objects import trunk as trunk_objects
from neutron.services.trunk import constants as trunk_consts
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk.rpc import constants
LOG = logging.getLogger(__name__)
# This module contains stub (client-side) and skeleton (server-side)
# proxy code that executes in the Neutron server process space. This
# is needed if any of the trunk service plugin drivers has a remote
# component (e.g. agent), that needs to communicate with the Neutron
# Server.
# The Server side exposes the following remote methods:
#
# - lookup method to retrieve trunk details: used by the agent to learn
# about the trunk.
# - update methods for trunk and its subports: used by the agent to
# inform the server about local trunk status changes.
#
# For agent-side stub and skeleton proxy code, please look at agent.py
def trunk_by_port_provider(resource, port_id, context, **kwargs):
"""Provider callback to supply trunk information by parent port."""
return trunk_objects.Trunk.get_object(context, port_id=port_id)
class TrunkSkeleton(object):
"""Skeleton proxy code for agent->server communication."""
# API version history:
# 1.0 Initial version
target = oslo_messaging.Target(version='1.0',
namespace=constants.TRUNK_BASE_NAMESPACE)
_core_plugin = None
def __init__(self):
# Used to provide trunk lookups for the agent.
registry.provide(trunk_by_port_provider, resources.TRUNK)
self._connection = n_rpc.create_connection()
self._connection.create_consumer(
constants.TRUNK_BASE_TOPIC, [self], fanout=False)
self._connection.consume_in_threads()
@property
def core_plugin(self):
if not self._core_plugin:
self._core_plugin = manager.NeutronManager.get_plugin()
return self._core_plugin
@log_helpers.log_method_call
def update_subport_bindings(self, context, subports):
"""Update subport bindings to match trunk host binding."""
el = context.elevated()
ports_by_trunk_id = collections.defaultdict(list)
updated_ports = collections.defaultdict(list)
for s in subports:
ports_by_trunk_id[s['trunk_id']].append(s['port_id'])
for trunk_id, subport_ids in ports_by_trunk_id.items():
trunk = trunk_objects.Trunk.get_object(el, id=trunk_id)
if not trunk:
LOG.debug("Trunk not found. id: %s", trunk_id)
continue
trunk_updated_ports = self._process_trunk_subport_bindings(
el,
trunk,
subport_ids)
updated_ports[trunk.id].extend(trunk_updated_ports)
return updated_ports
def update_trunk_status(self, context, trunk_id, status):
"""Update the trunk status to reflect outcome of data plane wiring."""
with db_api.autonested_transaction(context.session):
trunk = trunk_objects.Trunk.get_object(context, id=trunk_id)
if trunk:
trunk.update(status=status)
def _process_trunk_subport_bindings(self, context, trunk, port_ids):
"""Process port bindings for subports on the given trunk."""
updated_ports = []
trunk_port_id = trunk.port_id
trunk_port = self.core_plugin.get_port(context, trunk_port_id)
trunk_host = trunk_port.get(portbindings.HOST_ID)
# NOTE(status_police) Set the trunk in BUILD state before processing
# subport bindings. The trunk will stay in BUILD state until an
# attempt has been made to bind all subports passed here and the
# agent acknowledges the operation was successful.
trunk.update(status=trunk_consts.BUILD_STATUS)
for port_id in port_ids:
try:
updated_port = self._handle_port_binding(context, port_id,
trunk, trunk_host)
# NOTE(fitoduarte): consider trimming down the content
# of the port data structure.
updated_ports.append(updated_port)
except trunk_exc.SubPortBindingError as e:
LOG.error(_LE("Failed to bind subport: %s"), e)
# NOTE(status_police) The subport binding has failed in a
# manner in which we cannot proceed and the user must take
# action to bring the trunk back to a sane state.
trunk.update(status=trunk_consts.ERROR_STATUS)
return []
except Exception as e:
msg = _LE("Failed to bind subport port %(port)s on trunk "
"%(trunk)s: %(exc)s")
LOG.error(msg, {'port': port_id, 'trunk': trunk.id, 'exc': e})
if len(port_ids) != len(updated_ports):
trunk.update(status=trunk_consts.DEGRADED_STATUS)
return updated_ports
def _handle_port_binding(self, context, port_id, trunk, trunk_host):
"""Bind the given port to the given host.
:param context: The context to use for the operation
:param port_id: The UUID of the port to be bound
:param trunk: The trunk that the given port belongs to
:param trunk_host: The host to bind the given port to
"""
port = self.core_plugin.update_port(
context, port_id,
{'port': {portbindings.HOST_ID: trunk_host,
'device_owner': trunk_consts.TRUNK_SUBPORT_OWNER}})
vif_type = port.get(portbindings.VIF_TYPE)
if vif_type == portbindings.VIF_TYPE_BINDING_FAILED:
raise trunk_exc.SubPortBindingError(port_id=port_id,
trunk_id=trunk.id)
return port
class TrunkStub(object):
"""Stub proxy code for server->agent communication."""
def __init__(self):
self._resource_rpc = resources_rpc.ResourcesPushRpcApi()
@log_helpers.log_method_call
def trunk_created(self, context, trunk):
"""Tell the agent about a trunk being created."""
self._resource_rpc.push(context, [trunk], events.CREATED)
@log_helpers.log_method_call
def trunk_deleted(self, context, trunk):
"""Tell the agent about a trunk being deleted."""
self._resource_rpc.push(context, [trunk], events.DELETED)
@log_helpers.log_method_call
def subports_added(self, context, subports):
"""Tell the agent about new subports to add."""
self._resource_rpc.push(context, subports, events.CREATED)
@log_helpers.log_method_call
def subports_deleted(self, context, subports):
"""Tell the agent about existing subports to remove."""
self._resource_rpc.push(context, subports, events.DELETED)
| sebrandon1/neutron | neutron/services/trunk/rpc/server.py | Python | apache-2.0 | 8,117 |
"""Unit tests for the javascript code."""
__author__ = 'John Orr ([email protected])'
import os
import subprocess
import unittest
import appengine_config
class AllJavaScriptTests(unittest.TestCase):
def karma_test(self, test_folder):
karma_conf = os.path.join(
appengine_config.BUNDLE_ROOT, 'tests', 'unit',
'javascript_tests', test_folder, 'karma.conf.js')
self.assertEqual(0, subprocess.call(['karma', 'start', karma_conf]))
def test_activity_generic(self):
self.karma_test('assets_lib_activity_generic')
def test_assessment_tags(self):
self.karma_test('modules_assessment_tags')
def test_butterbar(self):
self.karma_test('assets_lib_butterbar')
def test_certificate(self):
self.karma_test('modules_certificate')
def test_core_tags(self):
self.karma_test('modules_core_tags')
def test_dashboard(self):
self.karma_test('modules_dashboard')
def test_oeditor(self):
self.karma_test('modules_oeditor')
def test_questionnaire(self):
self.karma_test('modules_questionnaire')
def test_skill_map(self):
self.karma_test(os.path.join('modules_skill_map', 'lesson_editor'))
self.karma_test(
os.path.join('modules_skill_map', 'student_skill_widget'))
| daafgo/CourseBuilder-Xapi | tests/unit/javascript_tests.py | Python | apache-2.0 | 1,327 |
"""
Unit test for Treadmill linux runtime presence module.
"""
import os
import io
import shutil
import tempfile
import time
import unittest
from tests.testutils import mockzk
import mock
import kazoo
import kazoo.client
import yaml
import treadmill
from treadmill import exc
from treadmill import presence
from treadmill.apptrace import events
PROCCGROUPS = """#subsys_name hierarchy num_cgroups enabled
cpuset 6 1 1
cpu 7 1 1
cpuacct 7 1 1
memory 4 1 1
devices 3 20 1
freezer 8 1 1
net_cls 2 1 1
blkio 10 1 1
perf_event 11 1 1
hugetlb 9 1 1
pids 5 1 1
net_prio 2 1 1"""
PROCMOUNTS = """rootfs / rootfs rw 0 0
sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=239696k,nr_inodes=59924,mode=755 0 0
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0
devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0
tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_prio,net_cls 0 0
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpuacct,cpu 0 0
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
configfs /sys/kernel/config configfs rw,relatime 0 0
/dev/mapper/VolGroup00-LogVol00 / xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0
selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0
systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=33,pgrp=1,timeout=300,minproto=5,maxproto=5,direct 0 0
debugfs /sys/kernel/debug debugfs rw,relatime 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
/dev/sda2 /boot xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0
vagrant /vagrant vboxsf rw,nodev,relatime 0 0
home_centos_treadmill /home/centos/treadmill vboxsf rw,nodev,relatime 0 0
home_centos_treadmill-pid1 /home/centos/treadmill-pid1 vboxsf rw,nodev,relatime 0 0
tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=50040k,mode=700,uid=1000,gid=1000 0 0""" # noqa: E501
original_open = open
def _open_side_effect(path, *args):
if path == '/proc/mounts':
return io.StringIO(PROCMOUNTS)
elif path == '/proc/cgroups':
return io.StringIO(PROCCGROUPS)
else:
return original_open(path, *args)
class PresenceTest(mockzk.MockZookeeperTestCase):
"""Mock test for treadmill.presence."""
def setUp(self):
self.root = tempfile.mkdtemp()
self.events_dir = os.path.join(self.root, 'appevents')
os.mkdir(self.events_dir)
self.zkclient = kazoo.client.KazooClient()
super(PresenceTest, self).setUp()
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('kazoo.client.KazooClient.create', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get', mock.Mock())
@mock.patch('treadmill.sysinfo.hostname', mock.Mock())
@mock.patch('treadmill.subproc.check_call', mock.Mock())
@mock.patch('time.sleep', mock.Mock)
def test_registration(self):
"""Verifies presence registration."""
treadmill.sysinfo.hostname.return_value = 'myhostname'
manifest = {
'vip': {
'ip0': '192.168.0.1',
'ip1': '192.168.0.2'
},
'task': 't-0001',
'name': 'foo.test1',
'uniqueid': 'AAAAAA',
'proid': 'andreik',
'services': [
{
'command': '/usr/bin/python -m SimpleHTTPServer',
'name': 'web_server',
'restart': {
'interval': 60,
'limit': 3
}
},
{
'command': '/usr/bin/python -m SimpleHTTPServer',
'name': 'another_server'
},
{
'command': 'sshd -D -f /etc/ssh/sshd_config',
'name': 'sshd',
'proid': None
}
],
'endpoints': [
{
'port': 22,
'name': 'ssh',
'real_port': 5001,
},
{
'port': 8000,
'name': 'http',
'real_port': 5000,
}
]
}
app_presence = presence.EndpointPresence(self.zkclient, manifest)
app_presence.register_endpoints()
kazoo.client.KazooClient.create.assert_has_calls([
mock.call('/endpoints/foo/test1:tcp:ssh', b'myhostname:5001',
ephemeral=True, makepath=True, acl=mock.ANY,
sequence=False),
mock.call('/endpoints/foo/test1:tcp:http', b'myhostname:5000',
ephemeral=True, makepath=True, acl=mock.ANY,
sequence=False),
])
retry_happened = []
def node_exists(*_args, **_kwargs):
"""Simulate existence of ephemeral node."""
if retry_happened:
return
else:
retry_happened.append(1)
raise kazoo.client.NodeExistsError()
kazoo.client.KazooClient.create.reset()
kazoo.client.KazooClient.create.side_effect = node_exists
kazoo.client.KazooClient.get.return_value = ('{}', {})
app_presence.register_endpoints()
self.assertTrue(retry_happened)
self.assertTrue(time.sleep.called)
kazoo.client.KazooClient.create.assert_has_calls([
mock.call('/endpoints/foo/test1:tcp:ssh', b'myhostname:5001',
ephemeral=True, makepath=True, acl=mock.ANY,
sequence=False),
mock.call('/endpoints/foo/test1:tcp:http', b'myhostname:5000',
ephemeral=True, makepath=True, acl=mock.ANY,
sequence=False),
])
kazoo.client.KazooClient.create.reset()
kazoo.client.KazooClient.create.side_effect = (
kazoo.client.NodeExistsError
)
self.assertRaises(exc.ContainerSetupError,
app_presence.register_endpoints)
@mock.patch('kazoo.client.KazooClient.get', mock.Mock())
@mock.patch('kazoo.client.KazooClient.delete', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get_children', mock.Mock())
def test_kill(self):
"""Checks removal of the endpoints."""
zk_content = {
'running': {
'myproid.aaa': 'xxx.xx.com',
'myproid.bbb': 'yyy.xx.com'
},
'endpoints': {
'myproid': {
'aaa:tcp:http': 'xxx.xx.com:1234',
'bbb:tcp:http': 'yyy.xx.com:1234',
},
},
'servers': {
'xxx.xx.com': {},
},
'server.presence': {
'xxx.xx.com': {},
},
'placement': {
'xxx.xx.com': {
'myproid.aaa': {},
'myproid.bbb': {},
}
},
'scheduled': {
'myproid.aaa': {
'endpoints': [{'name': 'http', 'port': 8888}],
},
'myproid.bbb': {
'endpoints': [{'name': 'http', 'port': 8888}],
},
}
}
self.make_mock_zk(zk_content)
presence.kill_node(self.zkclient, 'xxx.xx.com')
# aaa running node is removed.
self.assertNotIn('myproid.aaa', zk_content['running'])
# bbb is not removed, as 'running' node has different hostname.
self.assertIn('myproid.bbb', zk_content['running'])
# Same for endpoints - aaa is removed, bbb is not.
self.assertNotIn('aaa:tcp:http', zk_content['endpoints']['myproid'])
self.assertIn('bbb:tcp:http', zk_content['endpoints']['myproid'])
self.assertNotIn('xxx.xx.com', zk_content['server.presence'])
@mock.patch('kazoo.client.KazooClient.create', mock.Mock())
@mock.patch('treadmill.subproc.check_call', mock.Mock())
@mock.patch('treadmill.presence.ServicePresence.report_running',
mock.Mock())
@mock.patch('time.time', mock.Mock(return_value=0))
@mock.patch('builtins.open', mock.Mock(side_effect=_open_side_effect))
def test_start_service(self):
"""Verifies restart/finish file interaction."""
manifest = {
'vip': {
'ip0': '192.168.0.1',
'ip1': '192.168.0.2'
},
'task': 't-0001',
'name': 'foo.test1',
'uniqueid': 'AAAAAA',
'proid': 'andreik',
'services': [
{
'command': '/usr/bin/python -m SimpleHTTPServer',
'name': 'web_server',
'restart': {
'interval': 60,
'limit': 3
}
},
{
'command': 'sshd -D -f /etc/ssh/sshd_config',
'name': 'sshd',
'restart': {
'interval': 60,
'limit': 3
},
'proid': None
}
],
'endpoints': [
{
'port': 22,
'name': 'ssh',
'real_port': 5001
},
{
'port': 8000,
'name': 'http',
'real_port': 5000
}
]
}
app_presence = presence.ServicePresence(
manifest,
container_dir=self.root,
appevents_dir=self.events_dir
)
self.assertTrue(app_presence.start_service('web_server'))
os.mkdir(os.path.join(self.root, 'services'))
os.mkdir(os.path.join(self.root, 'services', 'web_server'))
finished_file = os.path.join(self.root, 'services', 'web_server',
'finished')
# App will be restarted, since it exits outside of its interval.
time.time.return_value = 1001
with open(finished_file, 'a+') as f:
f.write('1000 1 0\n')
self.assertTrue(app_presence.start_service('web_server'))
time.time.return_value = 2001
with open(finished_file, 'a+') as f:
f.write('2000 1 0\n')
self.assertTrue(app_presence.start_service('web_server'))
time.time.return_value = 3001
with open(finished_file, 'a+') as f:
f.write('3000 1 0\n')
self.assertTrue(app_presence.start_service('web_server'))
time.time.return_value = 4001
with open(finished_file, 'a+') as f:
f.write('4000 1 0\n')
self.assertTrue(app_presence.start_service('web_server'))
@mock.patch('kazoo.client.KazooClient.create', mock.Mock())
@mock.patch('kazoo.client.KazooClient.exists', mock.Mock())
@mock.patch('kazoo.client.KazooClient.delete', mock.Mock())
@mock.patch('treadmill.sysinfo.hostname', mock.Mock())
@mock.patch('treadmill.appevents.post', mock.Mock())
def test_report_running(self):
"""Verifies report running sequence."""
manifest = {
'vip': {
'ip0': '192.168.0.1',
'ip1': '192.168.0.2'
},
'task': 't-0001',
'name': 'foo.test1#0001',
'uniqueid': 'AAAAAA',
'proid': 'andreik',
'services': [
{
'command': '/usr/bin/python -m SimpleHTTPServer',
'name': 'web_server',
'restart': {
'interval': 60,
'limit': 3
}
},
{
'command': 'sshd -D -f /etc/ssh/sshd_config',
'name': 'sshd',
'proid': None
}
],
'endpoints': [
{
'port': 22,
'name': 'ssh',
'real_port': 5001
},
{
'port': 8000,
'name': 'http',
'real_port': 5000
}
]
}
treadmill.sysinfo.hostname.return_value = 'server1.xx.com'
app_presence = presence.ServicePresence(
manifest,
container_dir=self.root,
appevents_dir=self.events_dir
)
kazoo.client.KazooClient.exists.return_value = False
app_presence.report_running('web_server')
treadmill.appevents.post.assert_called_with(
self.events_dir,
events.ServiceRunningTraceEvent(
instanceid='foo.test1#0001',
uniqueid='AAAAAA',
service='web_server'
)
)
kazoo.client.KazooClient.exists.return_value = True
app_presence.report_running('web_server')
treadmill.appevents.post.assert_called_with(
self.events_dir,
events.ServiceRunningTraceEvent(
instanceid='foo.test1#0001',
uniqueid='AAAAAA',
service='web_server'
)
)
@mock.patch('kazoo.client.KazooClient.create', mock.Mock())
@mock.patch('kazoo.client.KazooClient.delete', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get_children', mock.Mock())
@mock.patch('treadmill.sysinfo.hostname', mock.Mock())
@mock.patch('treadmill.subproc.call', mock.Mock())
@mock.patch('builtins.open', mock.Mock(side_effect=_open_side_effect))
def test_app_exit(self):
"""Verifies app deletion on service exit."""
manifest = {
'vip': {
'ip0': '192.168.0.1',
'ip1': '192.168.0.2'
},
'task': 't-0001',
'name': 'foo.test1#0001',
'uniqueid': 'AAAAAA',
'proid': 'andreik',
'services': [
{
'command': '/usr/bin/python -m SimpleHTTPServer',
'name': 'web_server',
'restart': {
'interval': 60,
'limit': 3
}
},
{
'command': 'sshd -D -f /etc/ssh/sshd_config',
'name': 'sshd',
'proid': None
}
],
'endpoints': [
{
'port': 22,
'name': 'ssh',
'real_port': 5001
},
{
'port': 8000,
'name': 'http',
'real_port': 5000
}
]
}
services_dir = os.path.join(self.root, 'services')
os.mkdir(services_dir)
treadmill.sysinfo.hostname.return_value = 'server1.xx.com'
app_presence = presence.ServicePresence(
manifest,
container_dir=self.root,
appevents_dir=self.events_dir
)
app_presence.services['web_server']['last_exit'] = {
'rc': 1,
'sig': 3,
}
app_presence.exit_app('web_server')
self.assertTrue(os.path.exists(os.path.join(self.root, 'exitinfo')))
self.assertEqual(
yaml.load(open(os.path.join(self.root, 'exitinfo')).read()),
{'rc': 1,
'sig': 3,
'service': 'web_server',
'killed': False,
'oom': False}
)
del app_presence.services['web_server']['last_exit']
app_presence.exit_app('web_server')
self.assertTrue(os.path.exists(os.path.join(self.root, 'exitinfo')))
self.assertEqual(
yaml.load(open(os.path.join(self.root, 'exitinfo')).read()),
{'service': 'web_server',
'killed': False,
'oom': False}
)
@mock.patch('kazoo.client.KazooClient.create', mock.Mock())
@mock.patch('treadmill.sysinfo.hostname', mock.Mock())
@mock.patch('treadmill.appevents.post', mock.Mock())
@mock.patch('time.time', mock.Mock(return_value=100))
@mock.patch('builtins.open', mock.Mock(side_effect=_open_side_effect))
def test_update_exit_status(self):
"""Verifies reading the finished file and updating task status."""
manifest = {
'vip': {
'ip0': '192.168.0.1',
'ip1': '192.168.0.2'
},
'task': 't-0001',
'name': 'foo.test1#0001',
'uniqueid': 'AAAAAA',
'proid': 'andreik',
'services': [
{
'command': '/usr/bin/python -m SimpleHTTPServer',
'name': 'web_server',
'restart': {
'interval': 60,
'limit': 3
}
},
{
'command': 'sshd -D -f /etc/ssh/sshd_config',
'name': 'sshd',
'proid': None
}
],
'endpoints': [
{
'port': 22,
'name': 'ssh',
'real_port': 5001
},
{
'port': 8000,
'name': 'http',
'real_port': 5000
}
]
}
treadmill.sysinfo.hostname.return_value = 'server1.xx.com'
app_presence = presence.ServicePresence(manifest,
container_dir=self.root,
appevents_dir=self.events_dir)
os.mkdir(os.path.join(self.root, 'services'))
os.mkdir(os.path.join(self.root, 'services', 'web_server'))
finished_file = os.path.join(self.root, 'services', 'web_server',
'finished')
with open(finished_file, 'a+') as f:
f.write('1000 1 0\n')
app_presence.update_exit_status('web_server')
treadmill.appevents.post.assert_called_with(
self.events_dir,
events.ServiceExitedTraceEvent(
instanceid='foo.test1#0001',
uniqueid='AAAAAA',
service='web_server',
rc=1,
signal=0
)
)
kazoo.client.KazooClient.create.reset_mock()
with open(finished_file, 'a+') as f:
f.write('2000 9 255\n')
app_presence.update_exit_status('web_server')
treadmill.appevents.post.assert_called_with(
self.events_dir,
events.ServiceExitedTraceEvent(
instanceid='foo.test1#0001',
uniqueid='AAAAAA',
service='web_server',
rc=9,
signal=255
)
)
reported_file = os.path.join(self.root, 'services', 'web_server',
'reported')
self.assertTrue(os.path.exists(reported_file))
# Calling update state twice is no-op, as reported file is newer.
kazoo.client.KazooClient.create.reset_mock()
app_presence.update_exit_status('web_server')
self.assertFalse(kazoo.client.KazooClient.create.called)
@mock.patch('kazoo.client.KazooClient.create', mock.Mock())
@mock.patch('treadmill.sysinfo.hostname', mock.Mock())
@mock.patch('treadmill.subproc.check_call', mock.Mock())
@mock.patch('treadmill.presence.ServicePresence.report_running',
mock.Mock())
@mock.patch('time.time', mock.Mock(return_value=None))
@mock.patch('builtins.open', mock.Mock(side_effect=_open_side_effect))
def test_restart_rate(self):
"""Verifies reading the finished file and updating task status."""
manifest = {
'task': 't-0001',
'name': 'foo.test1#0001',
'uniqueid': 'AAAAAA',
'proid': 'andreik',
'services': [
{
'command': '/usr/bin/python -m SimpleHTTPServer',
'name': 'web_server',
'restart': {
'interval': 60,
'limit': 5
}
},
],
}
treadmill.sysinfo.hostname.return_value = 'server1.xx.com'
app_presence = presence.ServicePresence(
manifest,
container_dir=self.root,
appevents_dir=self.events_dir
)
os.mkdir(os.path.join(self.root, 'services'))
os.mkdir(os.path.join(self.root, 'services', 'web_server'))
finished_file = os.path.join(self.root, 'services', 'web_server',
'finished')
time.time.return_value = 1059
# Five restarts in less than 60 sec, service should not be restarted
with open(finished_file, 'w') as f:
f.write('1000 1 0\n')
f.write('1001 1 0\n')
f.write('1002 1 0\n')
f.write('1003 1 0\n')
f.write('1059 1 0\n')
self.assertFalse(app_presence.start_service('web_server'))
# Fifth restart is 105 sec away, service should be restarted
time.time.return_value = 1105
with open(finished_file, 'w') as f:
f.write('1000 1 0\n')
f.write('1101 1 0\n')
f.write('1102 1 0\n')
f.write('1103 1 0\n')
f.write('1104 1 0\n')
self.assertTrue(app_presence.start_service('web_server'))
time.time.return_value = 2000
# Last restart in more than 30 sec, should be restarted
manifest['services'][0]['restart'] = {'limit': 1, 'interval': 30}
with open(finished_file, 'w') as f:
f.write('1000 1 0\n')
f.write('1950 1 0\n')
self.assertTrue(app_presence.start_service('web_server'))
# Last restart in less than 30 sec, should be *not* restarted
with open(finished_file, 'w') as f:
f.write('1000 1 0\n')
f.write('1001 1 0\n')
f.write('1980 1 0\n')
self.assertFalse(app_presence.start_service('web_server'))
# Confirm that limit: 0 does not allow *any* exit
manifest['services'][0]['restart'] = {'limit': 0, 'interval': 60}
time.time.return_value = 2000
with open(finished_file, 'w') as f:
f.write('1000 1 0\n')
f.write('1001 1 0\n')
f.write('1002 1 0\n')
f.write('1003 1 0\n')
f.write('1004 1 0\n')
self.assertFalse(app_presence.start_service('web_server'))
@mock.patch('kazoo.client.KazooClient.create', mock.Mock())
@mock.patch('treadmill.cgroups.get_mountpoint',
mock.Mock(return_value='/cgroups'))
@mock.patch('treadmill.sysinfo.hostname', mock.Mock())
@mock.patch('treadmill.subproc.check_call', mock.Mock())
def test_exit_info(self):
"""Tests collection of exit info."""
manifest = {
'vip': {
'ip0': '192.168.0.1',
'ip1': '192.168.0.2'
},
'task': 't-0001',
'name': 'foo.test1#0001',
'uniqueid': 'AAAAAA',
'proid': 'andreik',
'services': [
{
'command': '/usr/bin/python -m SimpleHTTPServer',
'name': 'web_server',
'restart': {
'interval': 60,
'limit': 3
}
},
{
'command': 'sshd -D -f /etc/ssh/sshd_config',
'name': 'sshd',
'proid': None
}
],
'endpoints': [
{
'port': 22,
'name': 'ssh',
'real_port': 5001
},
{
'port': 8000,
'name': 'http',
'real_port': 5000
}
]
}
os.mkdir(os.path.join(self.root, 'services'))
os.mkdir(os.path.join(self.root, 'services', 'web_server'))
finished_file = os.path.join(self.root, 'services', 'web_server',
'finished')
with open(finished_file, 'a+') as f:
f.write('1000 1 0\n')
app_presence = presence.ServicePresence(
manifest,
container_dir=self.root,
appevents_dir=self.events_dir
)
ws_svc_dir = os.path.join(self.root, 'services', 'web_server')
einfo, count = app_presence.exit_info(ws_svc_dir)
self.assertEqual(1, count)
self.assertEqual(1, einfo['rc'])
self.assertEqual(0, einfo['sig'])
self.assertFalse(einfo['oom'])
with open(finished_file, 'a+') as f:
f.write('1001 255 9\n')
einfo, count = app_presence.exit_info(ws_svc_dir)
self.assertEqual(2, count)
self.assertEqual(255, einfo['rc'])
self.assertEqual(9, einfo['sig'])
self.assertFalse(einfo['oom'])
open_name = 'builtins.open'
with mock.patch(open_name, mock.mock_open()) as mock_open:
file_mock = mock.MagicMock(spec=io.IOBase)
file_mock.__enter__.return_value.read.return_value = '1'
mock_open.return_value = file_mock
self.assertTrue(presence.is_oom())
if __name__ == '__main__':
unittest.main()
| ThoughtWorksInc/treadmill | tests/presence_test.py | Python | apache-2.0 | 27,098 |
from lino.api import dd, _
from lino.utils.mldbc.mixins import BabelDesignated
class Expression(BabelDesignated):
class Meta:
verbose_name = _('Expression')
verbose_name_plural = _('Expressions')
class Expressions(dd.Table):
model = Expression
column_names = 'id designation *'
| lino-framework/book | lino_book/projects/de_BE/models.py | Python | bsd-2-clause | 313 |
from PyObjCTools.TestSupport import *
from Foundation import *
class TestNSDateFormatter (TestCase):
def testOutput(self):
formatter = NSDateFormatter.alloc().init()
formatter.setDateFormat_("yyyy/mm/dd")
self.assertResultIsBOOL(NSDateFormatter.getObjectValue_forString_range_error_)
self.assertArgIsOut(NSDateFormatter.getObjectValue_forString_range_error_, 0)
self.assertArgIsInOut(NSDateFormatter.getObjectValue_forString_range_error_, 2)
self.assertArgIsOut(NSDateFormatter.getObjectValue_forString_range_error_, 3)
ok, val, range, err = formatter.getObjectValue_forString_range_error_(
None, "2008/10/12", NSRange(0, 10), None)
self.assertTrue(ok)
self.assertIsInstance(val, NSDate)
self.assertEqual(range , NSRange(0, 10))
self.assertIs(err, None)
self.assertResultIsBOOL(NSDateFormatter.getObjectValue_forString_range_error_)
self.assertArgIsInOut(NSDateFormatter.getObjectValue_forString_range_error_, 2)
self.assertArgIsOut(NSDateFormatter.getObjectValue_forString_range_error_, 3)
def testConstants(self):
self.assertEqual(NSDateFormatterNoStyle, kCFDateFormatterNoStyle)
self.assertEqual(NSDateFormatterShortStyle, kCFDateFormatterShortStyle)
self.assertEqual(NSDateFormatterMediumStyle, kCFDateFormatterMediumStyle)
self.assertEqual(NSDateFormatterLongStyle, kCFDateFormatterLongStyle)
self.assertEqual(NSDateFormatterFullStyle, kCFDateFormatterFullStyle)
self.assertEqual(NSDateFormatterBehaviorDefault, 0)
self.assertEqual(NSDateFormatterBehavior10_0, 1000)
self.assertEqual(NSDateFormatterBehavior10_4, 1040)
def testMethods(self):
self.assertResultIsBOOL(NSDateFormatter.generatesCalendarDates)
self.assertArgIsBOOL(NSDateFormatter.setGeneratesCalendarDates_, 0)
self.assertResultIsBOOL(NSDateFormatter.isLenient)
self.assertArgIsBOOL(NSDateFormatter.setLenient_, 0)
self.assertResultIsBOOL(NSDateFormatter.isLenient)
self.assertArgIsBOOL(NSDateFormatter.initWithDateFormat_allowNaturalLanguage_, 1)
self.assertResultIsBOOL(NSDateFormatter.allowsNaturalLanguage)
@min_os_level('10.6')
def testMethods10_6(self):
self.assertResultIsBOOL(NSDateFormatter.doesRelativeDateFormatting)
self.assertArgIsBOOL(NSDateFormatter.setDoesRelativeDateFormatting_, 0)
if __name__ == "__main__":
main()
| albertz/music-player | mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsdateformatter.py | Python | bsd-2-clause | 2,500 |
import os
import angr
import angr.analyses.decompiler
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
def test_smoketest():
p = angr.Project(os.path.join(test_location, 'x86_64', 'all'), auto_load_libs=False)
cfg = p.analyses.CFG(normalize=True)
main_func = cfg.kb.functions['main']
_ = p.analyses.RegionIdentifier(main_func)
if __name__ == "__main__":
test_smoketest()
| angr/angr | tests/test_regionidentifier.py | Python | bsd-2-clause | 459 |
SECRET_KEY = 'bla'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'XXX',
'USER': 'XXX',
'PASSWORD': 'XXX',
'HOST': 'XXX',
'PORT': XXX
}
}
INSTALLED_APPS = (
'django_extensions',
'app',
)
| mkuiack/tkp | documentation/devref/database/schema/settings.py | Python | bsd-2-clause | 293 |
#!/usr/bin/env python
'''
@author David Stuebe
@file pyon/core/interceptor/test/test_msgpack_numpy_hook.py
@description test for raw msgpack hook
'''
from nose.tools import *
import unittest
import collections
import time
import numpy
import random
from msgpack import packb, unpackb
import hashlib
from pyon.core.interceptor.encode import encode_ion, decode_ion
def sha1(buf):
return hashlib.sha1(buf).hexdigest().upper()
count =0
class PackRunBase(object):
_decoder = None
_encoder = None
types = collections.OrderedDict(
[
('boolean',('bool',random.randint,(0, 1)) ),
('|S1',('|S1', lambda o: chr(count) , (None,) ) ),
('|S16',('|S16', lambda o: chr(count)*16 , (None,) ) ),
('int8',('int8',random.randint,(-(1 << 7), (1 << 7)-1)) ),
('int16',('int16',random.randint,(-(1 << 15), (1 << 15)-1)) ),
('int32',('int32',random.randint,(-(1 << 31), (1 << 31)-1)) ),
('int64',('int64',random.randint,(-(1 << 63), (1 << 63)-1)) ),
('uint8',('uint8',random.randint,(0, (1 << 8)-1)) ),
('uint16',('uint16',random.randint,(0, (1 << 16)-1)) ),
('uint32',('uint32',random.randint,(0, (1 << 32)-1)) ),
('uint64',('uint64',random.randint,(0, (1 << 64)-1)) ),
('float16_eps',('float16',lambda o: numpy.float16("1.0")+o ,(numpy.finfo('float16').eps,)) ),
('float16_epsneg',('float16',lambda o: 1-o ,(numpy.finfo('float16').epsneg,)) ),
('float16',('float16',numpy.random.uniform,(numpy.finfo('float16').min, numpy.finfo('float16').max)) ),
('float32_eps',('float32',lambda o: 1+o ,(numpy.finfo('float32').eps,)) ),
('float32_epsneg',('float32',lambda o: 1-o ,(numpy.finfo('float32').epsneg,)) ),
('float32',('float32',numpy.random.uniform,(numpy.finfo('float32').min, numpy.finfo('float32').max)) ),
('float64_eps',('float64',lambda o: 1+o ,(numpy.finfo('float64').eps,)) ),
('float64_epsneg',('float64',lambda o: 1-o ,(numpy.finfo('float64').epsneg,)) ),
('float64',('float64',numpy.random.uniform,(numpy.finfo('float64').min, numpy.finfo('float64').max)) ),
('complex64',('complex64',lambda a,b: numpy.complex(numpy.random.uniform(a,b), numpy.random.uniform(a,b)) ,(numpy.finfo('float32').min, numpy.finfo('float32').max)) ),
('complex128',('complex128',lambda a,b: numpy.complex(numpy.random.uniform(a,b), numpy.random.uniform(a,b)) ,(numpy.finfo('float64').min, numpy.finfo('float64').max)) ),
('object',('object',lambda o: {count:chr(count)*8}, (None,)))
]
)
shapes = ((1,),(3,4), (9,12,18), (10,10,10,10),)
#shapes = ((100,100,10,10),)
def __init__(self, *args, **kwargs):
self._decoder = decode_ion
self._encoder = encode_ion
def test_all(self):
for shape in self.shapes:
print "========================"
print "========================"
print "========================"
for type_name,(type, func, args) in self.types.iteritems():
print "Running type: %s, shape: %s" % (type_name, str(shape))
self.run_it(self._encoder, self._decoder, type, func, args, shape)
def run_it(self, encoder, decoder, type, func, args, shape):
array = numpy.zeros(shape, type)
count = 0
for x in numpy.nditer(array, flags=['refs_ok'], op_flags=['readwrite']):
count +=1
x[...] = func(*args)
tic = time.time()
msg = packb(array, default=encoder)
new_array = unpackb(msg,object_hook=decoder)
toc = time.time() - tic
print 'Binary Size: "%d", Time: %s' % (len(msg), toc)
assert_true((array == new_array).all())
if type is not 'object':
# Do a second check - based on sha1...
assert_equals(sha1(array.tostring()), sha1(new_array.tostring()))
class NumpyMsgPackTestCase(unittest.TestCase, PackRunBase ):
def __init__(self,*args, **kwargs):
unittest.TestCase.__init__(self,*args, **kwargs)
PackRunBase.__init__(self,*args, **kwargs)
if __name__ == '__main__':
pb = PackRunBase()
pb.test_all()
| crchemist/scioncc | src/pyon/core/interceptor/test/test_msgpack_numpy_hook.py | Python | bsd-2-clause | 4,296 |
#
# JpHelp.py -- Jupyter web notebook help routines.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import time
import random
import datetime
import threading
from tornado.ioloop import IOLoop
from ginga.misc import Bunch, Callback, log
default_timer_interval_msec = 10
class TimerFactory(object):
"""
As long as Jupyter notebooks use Tornado we can count on using the
tornado io loop to help us implement a timer. But if that ever changes
we need to update this class to use a new mechanism.
"""
def __init__(self, logger=None):
self.timer_lock = threading.RLock()
self.timer_cnt = 0
self.timer = {}
self.base_interval_msec = default_timer_interval_msec
self._timeout = None
if logger is None:
# substitute a null logger if user didn't provide one
logger = log.get_logger(name='timerfactory', null=True,
level=50)
self.logger = logger
def wind(self):
# randomize the first timeout so we don't get every timer
# expiring at the same time
interval = random.randint(1, self.base_interval_msec) # nosec
delta = datetime.timedelta(milliseconds=interval)
self._timeout = IOLoop.current().add_timeout(delta, self.timer_tick)
def timer_tick(self):
"""Callback executed every self.base_interval_msec to check timer
expirations.
"""
# TODO: should exceptions thrown from this be caught and ignored
self.process_timers()
delta = datetime.timedelta(milliseconds=self.base_interval_msec)
self._timeout = IOLoop.current().add_timeout(delta, self.timer_tick)
def process_timers(self):
self.logger.debug("check timers")
funcs = []
with self.timer_lock:
for key, bnch in self.timer.items():
if (bnch.deadline is not None) and \
(time.time() >= bnch.deadline):
bnch.deadline = None
funcs.append(bnch.func)
for func in funcs:
try:
func()
except Exception as e:
pass
# self.logger.debug("update should have been called.")
def add_timer(self, func):
with self.timer_lock:
if self._timeout is None:
self.wind()
name = self.timer_cnt
self.timer_cnt += 1
timer = Bunch.Bunch(deadline=None, func=func, name=name)
self.timer[name] = timer
return timer
def remove_timer(self, timer):
with self.timer_lock:
name = timer.name
del self.timer[name]
def reset_timer(self, timer, time_sec):
with self.timer_lock:
if timer not in self.timer:
self.timer[timer.name] = timer
self.logger.debug("setting timer...")
timer.deadline = time.time() + time_sec
default_timer_factory = TimerFactory()
class Timer(Callback.Callbacks):
"""Abstraction of a GUI-toolkit implemented timer."""
def __init__(self, duration=0.0, timer_factory=None):
"""Create a timer set to expire after `duration` sec.
"""
super(Timer, self).__init__()
if timer_factory is None:
timer_factory = default_timer_factory
self.timer_factory = timer_factory
self.duration = duration
# For storing aritrary data with timers
self.data = Bunch.Bunch()
self._timer = self.timer_factory.add_timer(self._redirect_cb)
for name in ('expired', 'canceled'):
self.enable_callback(name)
def start(self, duration=None):
"""Start the timer. If `duration` is not None, it should
specify the time to expiration in seconds.
"""
if duration is None:
duration = self.duration
self.set(duration)
def set(self, duration):
self.stop()
self.timer_factory.reset_timer(self._timer, duration)
def _redirect_cb(self):
self.make_callback('expired')
def stop(self):
try:
self.timer_factory.remove_timer(self._timer)
except Exception:
pass
def cancel(self):
"""Cancel this timer. If the timer is not running, there
is no error.
"""
self.stop()
self.make_callback('canceled')
clear = cancel
| pllim/ginga | ginga/web/jupyterw/JpHelp.py | Python | bsd-3-clause | 4,515 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ThreadedComment'
db.create_table('threadedcomments_comment', (
('comment_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['comments.Comment'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.TextField')(blank=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='children', null=True, blank=True, to=orm['threadedcomments.ThreadedComment'])),
('last_child', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['threadedcomments.ThreadedComment'], null=True, blank=True)),
('tree_path', self.gf('django.db.models.fields.TextField')(db_index=True)),
))
db.send_create_signal('threadedcomments', ['ThreadedComment'])
def backwards(self, orm):
# Deleting model 'ThreadedComment'
db.delete_table('threadedcomments_comment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'threadedcomments.threadedcomment': {
'Meta': {'ordering': "('tree_path',)", 'object_name': 'ThreadedComment', 'db_table': "'threadedcomments_comment'", '_ormbases': ['comments.Comment']},
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'last_child': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['threadedcomments.ThreadedComment']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'children'", 'null': 'True', 'blank': 'True', 'to': "orm['threadedcomments.ThreadedComment']"}),
'title': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tree_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'})
}
}
complete_apps = ['threadedcomments'] | PolicyStat/django-threadedcomments | threadedcomments/migrations/0001_initial.py | Python | bsd-3-clause | 7,192 |
# -*- coding: utf-8 -*-
import logging
from django.db import models, transaction
from south.db import db
from south.v2 import DataMigration
logger = logging.getLogger(__name__)
class Migration(DataMigration):
def forwards(self, orm):
logger.debug("Migrating Issue abstracts...")
n = 0
for i in orm['issues.Issue'].objects.filter(is_published=True
).exclude(abstract=None):
with transaction.commit_on_success():
agenda_items = orm['meetings.AgendaItem'].objects.filter(
issue=i)
assert agenda_items.count() > 0
ai = agenda_items.order_by('meeting__created_at')[0]
ai.background = i.abstract
ai.save()
i.abstract = None
i.save()
n += 1
logger.debug("...migrated %d Issue abstracts" % n)
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'communities.community': {
'Meta': {'object_name': 'Community'},
'board_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'default_quorum': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'official_identifier': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'referendum_ends_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'referendum_started': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'referendum_started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'straw_voting_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'38oy3fdq1k0pvwmbfg27uwhy'", 'unique': 'True', 'max_length': '24'}),
'upcoming_meeting_comments': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_guests': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'upcoming_meeting_location': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'upcoming_meeting_participants': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'+'", 'blank': 'True', 'to': u"orm['users.OCUser']"}),
'upcoming_meeting_published_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_scheduled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_started': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'upcoming_meeting_summary': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_title': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'upcoming_meeting_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'voting_ends_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'issues.issue': {
'Meta': {'ordering': "['order_in_upcoming_meeting', 'title']", 'object_name': 'Issue'},
'abstract': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'calculated_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issues'", 'to': u"orm['communities.Community']"}),
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issues_created'", 'to': u"orm['users.OCUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'length_in_minutes': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'order_in_upcoming_meeting': ('django.db.models.fields.IntegerField', [], {'default': '9999', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'x3th2xm1ijkmc5psxsj5s27h'", 'unique': 'True', 'max_length': '24'})
},
u'meetings.agendaitem': {
'Meta': {'unique_together': "(('meeting', 'issue'),)", 'object_name': 'AgendaItem'},
'background': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['issues.Issue']"}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agenda'", 'to': u"orm['meetings.Meeting']"}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'})
},
u'meetings.meeting': {
'Meta': {'ordering': "('-held_at',)", 'object_name': 'Meeting'},
'agenda_items': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['issues.Issue']", 'symmetrical': 'False', 'through': u"orm['meetings.AgendaItem']", 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'meetings'", 'to': u"orm['communities.Community']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'meetings_created'", 'to': u"orm['users.OCUser']"}),
'guests': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'held_at': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'participated_in_meeting'", 'symmetrical': 'False', 'through': u"orm['meetings.MeetingParticipant']", 'to': u"orm['users.OCUser']"}),
'scheduled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'7vl340o6jzj1tkst2bydbhpq'", 'unique': 'True', 'max_length': '24'})
},
u'meetings.meetingexternalparticipant': {
'Meta': {'object_name': 'MeetingExternalParticipant'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['meetings.Meeting']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'meetings.meetingparticipant': {
'Meta': {'unique_together': "(('meeting', 'ordinal'), ('meeting', 'user'))", 'object_name': 'MeetingParticipant'},
'default_group_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'participations'", 'to': u"orm['meetings.Meeting']"}),
'ordinal': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'participations'", 'to': u"orm['users.OCUser']"})
},
u'users.ocuser': {
'Meta': {'object_name': 'OCUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
}
}
complete_apps = ['meetings']
symmetrical = True
| nonZero/OpenCommunity | src/meetings/south_migrations/0007_update_agendaitem_background.py | Python | bsd-3-clause | 12,710 |
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from scipy._build_utils.system_info import get_info
lapack_opt = get_info('lapack_opt', notfound_action=2)
config = Configuration('interpolate', parent_package, top_path)
fitpack_src = [join('fitpack', '*.f')]
config.add_library('fitpack', sources=fitpack_src)
config.add_extension('interpnd',
sources=['interpnd.c'])
config.add_extension('_ppoly',
sources=['_ppoly.c'],
**lapack_opt)
config.add_extension('_bspl',
sources=['_bspl.c'],
libraries=['fitpack'],
depends=['src/__fitpack.h'] + fitpack_src)
config.add_extension('_fitpack',
sources=['src/_fitpackmodule.c'],
libraries=['fitpack'],
depends=(['src/__fitpack.h','src/multipack.h']
+ fitpack_src)
)
config.add_extension('dfitpack',
sources=['src/fitpack.pyf'],
libraries=['fitpack'],
depends=fitpack_src,
)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| Eric89GXL/scipy | scipy/interpolate/setup.py | Python | bsd-3-clause | 1,571 |
"""
:author: Gary Ruben, 2009
:license: modified BSD
"""
__all__ = ["frt2", "ifrt2"]
import numpy as np
from numpy import roll, newaxis
def frt2(a):
"""Compute the 2-dimensional finite radon transform (FRT) for an n x n
integer array.
Parameters
----------
a : array_like
A 2-D square n x n integer array.
Returns
-------
FRT : 2-D ndarray
Finite Radon Transform array of (n+1) x n integer coefficients.
See Also
--------
ifrt2 : The two-dimensional inverse FRT.
Notes
-----
The FRT has a unique inverse iff n is prime. [FRT]
The idea for this algorithm is due to Vlad Negnevitski.
Examples
--------
Generate a test image:
Use a prime number for the array dimensions
>>> SIZE = 59
>>> img = np.tri(SIZE, dtype=np.int32)
Apply the Finite Radon Transform:
>>> f = frt2(img)
References
----------
.. [FRT] A. Kingston and I. Svalbe, "Projective transforms on periodic
discrete image arrays," in P. Hawkes (Ed), Advances in Imaging
and Electron Physics, 139 (2006)
"""
if a.ndim != 2 or a.shape[0] != a.shape[1]:
raise ValueError("Input must be a square, 2-D array")
ai = a.copy()
n = ai.shape[0]
f = np.empty((n + 1, n), np.uint32)
f[0] = ai.sum(axis=0)
for m in range(1, n):
# Roll the pth row of ai left by p places
for row in range(1, n):
ai[row] = roll(ai[row], -row)
f[m] = ai.sum(axis=0)
f[n] = ai.sum(axis=1)
return f
def ifrt2(a):
"""Compute the 2-dimensional inverse finite radon transform (iFRT) for
an (n+1) x n integer array.
Parameters
----------
a : array_like
A 2-D (n+1) row x n column integer array.
Returns
-------
iFRT : 2-D n x n ndarray
Inverse Finite Radon Transform array of n x n integer coefficients.
See Also
--------
frt2 : The two-dimensional FRT
Notes
-----
The FRT has a unique inverse iff n is prime.
See [1]_ for an overview.
The idea for this algorithm is due to Vlad Negnevitski.
Examples
--------
>>> SIZE = 59
>>> img = np.tri(SIZE, dtype=np.int32)
Apply the Finite Radon Transform:
>>> f = frt2(img)
Apply the Inverse Finite Radon Transform to recover the input
>>> fi = ifrt2(f)
Check that it's identical to the original
>>> assert len(np.nonzero(img-fi)[0]) == 0
References
----------
.. [1] A. Kingston and I. Svalbe, "Projective transforms on periodic
discrete image arrays," in P. Hawkes (Ed), Advances in Imaging
and Electron Physics, 139 (2006)
"""
if a.ndim != 2 or a.shape[0] != a.shape[1] + 1:
raise ValueError("Input must be an (n+1) row x n column, 2-D array")
ai = a.copy()[:-1]
n = ai.shape[1]
f = np.empty((n, n), np.uint32)
f[0] = ai.sum(axis=0)
for m in range(1, n):
# Rolls the pth row of ai right by p places.
for row in range(1, ai.shape[0]):
ai[row] = roll(ai[row], row)
f[m] = ai.sum(axis=0)
f += a[-1][newaxis].T
f = (f - ai[0].sum()) / n
return f
| chintak/scikit-image | skimage/transform/finite_radon_transform.py | Python | bsd-3-clause | 3,206 |
from django.db.models import Q
from rest_framework import generics, serializers
from kitsune.gallery.models import Image
from kitsune.sumo.api import (
LocaleNegotiationMixin, InequalityFilterBackend, DateTimeUTCField, ImageUrlField)
class ImageShortSerializer(serializers.ModelSerializer):
url = ImageUrlField(source='file')
class Meta(object):
model = Image
fields = ('id', 'title', 'url', 'locale', 'width', 'height')
class ImageDetailSerializer(ImageShortSerializer):
created = DateTimeUTCField(read_only=True)
updated = DateTimeUTCField(read_only=True)
updated_by = serializers.SlugRelatedField(slug_field='username')
class Meta(ImageShortSerializer.Meta):
fields = ImageShortSerializer.Meta.fields + (
'created', 'updated', 'updated_by', 'description', 'is_draft',
'creator')
class ImageList(LocaleNegotiationMixin, generics.ListAPIView):
"""List all image ids."""
queryset = Image.objects.all()
serializer_class = ImageShortSerializer
paginate_by = 100
filter_fields = ['height', 'width']
filter_backends = [InequalityFilterBackend]
def get_queryset(self):
not_is_draft = Q(is_draft=None) | Q(is_draft=False)
queryset = self.queryset.filter(not_is_draft)
# locale may come from the Accept-language header, but it can be
# overridden via the query string.
locale = self.get_locale()
locale = self.request.QUERY_PARAMS.get('locale', locale)
if locale is not None:
queryset = queryset.filter(locale=locale)
return queryset
class ImageDetail(generics.RetrieveAPIView):
queryset = Image.objects.all()
serializer_class = ImageDetailSerializer
| YOTOV-LIMITED/kitsune | kitsune/gallery/api.py | Python | bsd-3-clause | 1,747 |
from django.db import models
from publisher.query import PublisherQuerySet
class PublisherManager(models.Manager):
"""Manager with some support handling publisher.
"""
def get_query_set(self):
"""Change standard model queryset to our own.
"""
return PublisherQuerySet(self.model)
def drafts(self):
return self.filter(publisher_is_draft=True)
def public(self):
return self.filter(publisher_is_draft=False)
"""
def all(self):
raise NotImplementedError, ("Calling all() on manager of publisher "
"object is not allowed. Please use drafts() or public() method "
"instead. If this isn't accident use get_query_set().all() for "
"all instances.")
""" | emiquelito/django-cms-2.0 | publisher/manager.py | Python | bsd-3-clause | 777 |
import os
import skimage as si
import skimage.io as sio
import numpy as np
from numpy.testing import (
assert_array_equal, assert_array_almost_equal, run_module_suite)
from tempfile import NamedTemporaryFile
_plugins = sio.plugin_order()
sio.use_plugin('tifffile')
np.random.seed(0)
def teardown():
sio.reset_plugins()
def test_imread_uint16():
expected = np.load(os.path.join(si.data_dir, 'chessboard_GRAY_U8.npy'))
img = sio.imread(os.path.join(si.data_dir, 'chessboard_GRAY_U16.tif'))
assert img.dtype == np.uint16
assert_array_almost_equal(img, expected)
def test_imread_uint16_big_endian():
expected = np.load(os.path.join(si.data_dir, 'chessboard_GRAY_U8.npy'))
img = sio.imread(os.path.join(si.data_dir, 'chessboard_GRAY_U16B.tif'))
assert img.dtype == np.uint16
assert_array_almost_equal(img, expected)
class TestSave:
def roundtrip(self, dtype, x):
f = NamedTemporaryFile(suffix='.tif')
fname = f.name
f.close()
sio.imsave(fname, x)
y = sio.imread(fname)
assert_array_equal(x, y)
def test_imsave_roundtrip(self):
for shape in [(10, 10), (10, 10, 3), (10, 10, 4)]:
for dtype in (np.uint8, np.uint16, np.float32, np.int16,
np.float64):
x = np.random.rand(*shape)
if not np.issubdtype(dtype, float):
x = (x * np.iinfo(dtype).max).astype(dtype)
else:
x = x.astype(dtype)
yield self.roundtrip, dtype, x
if __name__ == "__main__":
run_module_suite()
| newville/scikit-image | skimage/io/tests/test_tifffile.py | Python | bsd-3-clause | 1,620 |
"""This module contains auxiliary functions for augmenting the ELFI graph."""
from functools import partial, reduce
from operator import add, mul
from toolz.functoolz import compose
from elfi.model.elfi_model import NodeReference, Operation
from elfi.utils import args_to_tuple
def add_pdf_gradient_nodes(model, log=False, nodes=None):
"""Add gradient nodes for distribution nodes to the model.
Returns the node names.
By default this gives the pdfs of the generated model parameters.
Parameters
----------
model : elfi.ElfiModel
log : bool, optional
Use gradient of logpdf, default False.
nodes : list, optional
List of distribution node names. Default is `model.parameters`.
Returns
-------
gradients : list
List of gradient node names.
"""
nodes = nodes or model.parameter_names
gradattr = 'gradient_pdf' if log is False else 'gradient_logpdf'
grad_nodes = _add_distribution_nodes(model, nodes, gradattr)
return [g.name for g in grad_nodes]
# TODO: check that there are no latent variables. See model.utils.ModelPrior
def add_pdf_nodes(model, joint=True, log=False, nodes=None):
"""Add pdf nodes for distribution nodes to the model.
Returns the node names.
By default this gives the pdfs of the generated model parameters.
Parameters
----------
model : elfi.ElfiModel
joint : bool, optional
If True (default) return a the joint pdf of the priors
log : bool, optional
Use logpdf, default False.
nodes : list, optional
List of distribution node names. Default is `model.parameters`.
Returns
-------
pdfs : list
List of node names. Either only the joint pdf node name or the separate pdf node
names depending on the `joint` argument.
"""
nodes = nodes or model.parameter_names
pdfattr = 'pdf' if log is False else 'logpdf'
pdfs = _add_distribution_nodes(model, nodes, pdfattr)
if joint:
if log:
return [add_reduce_node(model, pdfs, add, '_joint_{}*'.format(pdfattr))]
else:
return [add_reduce_node(model, pdfs, mul, '_joint_{}*'.format(pdfattr))]
else:
return [pdf.name for pdf in pdfs]
def _add_distribution_nodes(model, nodes, attr):
distribution_nodes = []
for n in nodes:
node = model[n]
op = getattr(node.distribution, attr)
distribution_nodes.append(
Operation(op, *([node] + node.parents), model=model, name='_{}_{}'.format(n, attr)))
return distribution_nodes
def add_reduce_node(model, nodes, reduce_operation, name):
"""Reduce the output from a collection of nodes.
Parameters
----------
model : elfi.ElfiModel
nodes : list
Either a list of node names or a list of node reference objects
reduce_operation : callable
name : str
Name for the reduce node
Returns
-------
name : str
name of the new node
"""
name = '_reduce*' if name is None else name
nodes = [n if isinstance(n, NodeReference) else model[n] for n in nodes]
op = Operation(
compose(partial(reduce, reduce_operation), args_to_tuple), *nodes, model=model, name=name)
return op.name
| HIIT/elfi | elfi/model/augmenter.py | Python | bsd-3-clause | 3,269 |
"""Provider traversal tests."""
from dependency_injector import providers
def test_traversal_overriding():
provider1 = providers.Provider()
provider2 = providers.Provider()
provider3 = providers.Provider()
provider = providers.Provider()
provider.override(provider1)
provider.override(provider2)
provider.override(provider3)
all_providers = list(provider.traverse())
assert len(all_providers) == 3
assert provider1 in all_providers
assert provider2 in all_providers
assert provider3 in all_providers
def test_traversal_overriding_nested():
provider1 = providers.Provider()
provider2 = providers.Provider()
provider2.override(provider1)
provider3 = providers.Provider()
provider3.override(provider2)
provider = providers.Provider()
provider.override(provider3)
all_providers = list(provider.traverse())
assert len(all_providers) == 3
assert provider1 in all_providers
assert provider2 in all_providers
assert provider3 in all_providers
def test_traverse_types_filtering():
provider1 = providers.Resource(dict)
provider2 = providers.Resource(dict)
provider3 = providers.Provider()
provider = providers.Provider()
provider.override(provider1)
provider.override(provider2)
provider.override(provider3)
all_providers = list(provider.traverse(types=[providers.Resource]))
assert len(all_providers) == 2
assert provider1 in all_providers
assert provider2 in all_providers
| ets-labs/python-dependency-injector | tests/unit/providers/traversal/test_provider_py3.py | Python | bsd-3-clause | 1,525 |
"""
If the arbitrary constant class from issue 1336 is ever implemented, this
should serve as a set of test cases.
"""
from sympy import (acos, cos, cosh, Eq, exp, Function, I, Integral, log, Pow,
S, sin, sinh, sqrt, Symbol)
from sympy.solvers.ode import constant_renumber, constantsimp
from sympy.utilities.pytest import XFAIL
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
C1 = Symbol('C1')
C2 = Symbol('C2')
C3 = Symbol('C3')
f = Function('f')
def test_constant_mul():
# We want C1 (Constant) below to absorb the y's, but not the x's
assert constant_renumber(constantsimp(y*C1, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(x*C1, x, 1), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp(C1*y, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1*x, x, 1), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp(2*C1, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1*2, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(y*C1*x, x, 1), 'C', 1, 1) == C1*x
assert constant_renumber(constantsimp(x*y*C1, x, 1), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp(y*x*C1, x, 1), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp(C1*y*(y + 1), x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(y*C1*(y + 1), x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(x*(y*C1), x, 1), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp(x*(C1*y), x, 1), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp(C1*(x*y), x, 1), 'C', 1, 1) == C1*x
assert constant_renumber(constantsimp((x*y)*C1, x, 1), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp((y*x)*C1, x, 1), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp(y*(y + 1)*C1, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1*x*y, x, 1), 'C', 1, 1) == C1*x
assert constant_renumber(constantsimp(x*C1*y, x, 1), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp((C1*x)*y, x, 1), 'C', 1, 1) == C1*x
assert constant_renumber(constantsimp(y*(x*C1), x, 1), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp((x*C1)*y, x, 1), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp(C1*x*y*x*y*2, x, 1), 'C', 1, 1) == C1*x**2
assert constant_renumber(constantsimp(C1*x*y*z, x, 1), 'C', 1, 1) == C1*x
assert constant_renumber(constantsimp(C1*x*y**2*sin(z), x, 1), 'C', 1, 1) == C1*x
assert constant_renumber(constantsimp(C1*C1, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1*C2, x, 2), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(C2*C2, x, 2), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(C1*C1*C2, x, 2), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(C1*x*2**x, x, 1), 'C', 1, 1) == C1*x*2**x
def test_constant_add():
assert constant_renumber(constantsimp(C1 + C1, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1 + 2, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(2 + C1, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1 + y, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1 + x, x, 1), 'C', 1, 1) == C1 + x
assert constant_renumber(constantsimp(C1 + x + y + x*y + 2, x, 1), 'C', 1, 1) == \
C1 + x*(y + 1)
assert constant_renumber(constantsimp(C1 + x + 2**x + y + 2, x, 1), 'C', 1, 1) == \
C1 + x + 2**x
assert constant_renumber(constantsimp(C1 + C1, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1 + C2, x, 2), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(C2 + C1, x, 2), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(C1 + C2 + C1, x, 2), 'C', 1, 2) == C1
def test_constant_power_as_base():
assert constant_renumber(constantsimp(C1**C1, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(Pow(C1,C1), x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1**C1, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1**C2, x, 2), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(C2**C1, x, 2), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(C2**C2, x, 2), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(C1**y, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1**x, x, 1), 'C', 1, 1) == C1**x
assert constant_renumber(constantsimp(C1**2, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1**(x*y), x, 1), 'C', 1, 1) == C1**(x*y)
def test_constant_power_as_exp():
assert constant_renumber(constantsimp(x**C1, x, 1), 'C', 1, 1) == x**C1
assert constant_renumber(constantsimp(y**C1, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(x**y**C1, x, 1), 'C', 1, 1) == x**C1
assert constant_renumber(constantsimp((x**y)**C1, x, 1), 'C', 1, 1) == (x**y)**C1
assert constant_renumber(constantsimp(x**(y**C1), x, 1), 'C', 1, 1) == x**C1
assert constant_renumber(constantsimp(x**C1**y, x, 1), 'C', 1, 1) == x**C1
assert constant_renumber(constantsimp(x**(C1**y), x, 1), 'C', 1, 1) == x**C1
assert constant_renumber(constantsimp((x**C1)**y, x, 1), 'C', 1, 1) == (x**C1)**y
assert constant_renumber(constantsimp(2**C1, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(S(2)**C1, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(exp(C1), x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(exp(C1+x), x, 1), 'C', 1, 1) == C1*exp(x)
assert constant_renumber(constantsimp(Pow(2, C1), x, 1), 'C', 1, 1) == C1
def test_constant_function():
assert constant_renumber(constantsimp(sin(C1), x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(f(C1), x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(f(C1, C1), x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(f(C1, C2), x, 2), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(f(C2, C1), x, 2), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(f(C2, C2), x, 2), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(f(C1, x), x, 1), 'C', 1, 2) == f(C1, x)
assert constant_renumber(constantsimp(f(C1, y), x, 1), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(f(y, C1), x, 1), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(f(C1, y, C2), x, 2), 'C', 1, 2) == C1
@XFAIL
def test_constant_function_multiple():
# The rules to not renumber in this case would be too complicated, and
# dsolve is not likely to ever encounter anything remotely like this.
assert constant_renumber(constantsimp(f(C1, C1, x), x, 1), 'C', 1, 1) == f(C1, C1, x)
def test_constant_multiple():
assert constant_renumber(constantsimp(C1*2 + 2, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(x*2/C1, x, 1), 'C', 1, 1) == C1*x
assert constant_renumber(constantsimp(C1**2*2 + 2, x, 1), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(sin(2*C1) + x + sqrt(2), x, 1), 'C', 1, 1) == C1 + x
assert constant_renumber(constantsimp(2*C1 + C2, x, 2), 'C', 1, 2) == C1
def test_ode_solutions():
# only a few examples here, the rest will be tested in the actual dsolve tests
assert constant_renumber(constantsimp(C1*exp(2*x)+exp(x)*(C2+C3), x, 3), 'C', 1, 3) == \
constant_renumber((C1*exp(x) + C2*exp(2*x)), 'C', 1, 2)
assert constant_renumber(constantsimp(Eq(f(x),I*C1*sinh(x/3) + C2*cosh(x/3)), x, 2),
'C', 1, 2) == constant_renumber(Eq(f(x), C1*sinh(x/3) + C2*cosh(x/3)), 'C', 1, 2)
assert constant_renumber(constantsimp(Eq(f(x),acos((-C1)/cos(x))), x, 1), 'C', 1, 1) == \
Eq(f(x),acos(C1/cos(x)))
assert constant_renumber(constantsimp(Eq(log(f(x)/C1) + 2*exp(x/f(x)), 0), x, 1),
'C', 1, 1) == Eq(log(C1*f(x)) + 2*exp(x/f(x)), 0)
assert constant_renumber(constantsimp(Eq(log(x*sqrt(2)*sqrt(1/x)*sqrt(f(x))\
/C1) + x**2/(2*f(x)**2), 0), x, 1), 'C', 1, 1) == \
Eq(log(C1*x*sqrt(1/x)*sqrt(f(x))) + x**2/(2*f(x)**2), 0)
assert constant_renumber(constantsimp(Eq(-exp(-f(x)/x)*sin(f(x)/x)/2 + log(x/C1) - \
cos(f(x)/x)*exp(-f(x)/x)/2, 0), x, 1), 'C', 1, 1) == \
Eq(-exp(-f(x)/x)*sin(f(x)/x)/2 + log(C1*x) - cos(f(x)/x)*exp(-f(x)/x)/2, 0)
u2 = Symbol('u2')
_a = Symbol('_a')
assert constant_renumber(constantsimp(Eq(-Integral(-1/(sqrt(1 - u2**2)*u2), \
(u2, _a, x/f(x))) + log(f(x)/C1), 0), x, 1), 'C', 1, 1) == \
Eq(-Integral(-1/(u2*sqrt(1 - u2**2)), (u2, _a, x/f(x))) + \
log(C1*f(x)), 0)
assert [constant_renumber(constantsimp(i, x, 1), 'C', 1, 1) for i in
[Eq(f(x), sqrt(-C1*x + x**2)), Eq(f(x), -sqrt(-C1*x + x**2))]] == \
[Eq(f(x), sqrt(x*(C1 + x))), Eq(f(x), -sqrt(x*(C1 + x)))]
def test_constant_Eq():
# C1 on the rhs is well-tested, but the lhs is only tested here
assert constantsimp(Eq(C1, 3 + f(x)*x), x, 1) == Eq(C1, f(x)*x)
| flacjacket/sympy | sympy/solvers/tests/test_constantsimp.py | Python | bsd-3-clause | 8,997 |
"""
:mod:`disco.settings` -- Disco Settings
=======================================
Settings can be specified in a Python file and/or using environment variables.
Settings specified in environment variables override those stored in a file.
The default settings are intended to make it easy to get Disco running on a single node.
:command:`make install` will create a more reasonable settings file for a cluster environment,
and put it in ``/etc/disco/settings.py``
Disco looks in the following places for a settings file:
- The settings file specified using the command line utility
``--settings`` option.
- ``~/.disco``
- ``/etc/disco/settings.py``
Possible settings for Disco are as follows:
.. envvar:: DISCO_DATA
Directory to use for writing data.
Default obtained using ``os.path.join(DISCO_ROOT, data)``.
.. envvar:: DISCO_DEBUG
Sets the debugging level for Disco.
Default is ``1``.
.. envvar:: DISCO_ERLANG
Command used to launch Erlang on all nodes in the cluster.
Default usually ``erl``, but depends on the OS.
.. envvar:: DISCO_EVENTS
If set, events are logged to `stdout`.
If set to ``json``, events will be written as JSON strings.
If set to ``nocolor``, ANSI color escape sequences will not be used, even if the terminal supports it.
Default is unset (the empty string).
.. envvar:: DISCO_FLAGS
Default is the empty string.
.. envvar:: DISCO_HOME
The directory which Disco runs out of.
If you run Disco out of the source directory,
you shouldn't need to change this.
If you use ``make install`` to install Disco,
it will be set properly for you in ``/etc/disco/settings.py``.
.. envvar:: DISCO_HTTPD
Command used to launch `lighttpd`.
Default is ``lighttpd``.
.. envvar:: DISCO_MASTER_HOME
Directory containing the Disco ``master`` directory.
Default is obtained using ``os.path.join(DISCO_HOME, 'master')``.
.. envvar:: DISCO_MASTER_HOST
The hostname of the master.
Default obtained using ``socket.gethostname()``.
.. envvar:: DISCO_MASTER_ROOT
Directory to use for writing master data.
Default obtained using ``os.path.join(DISCO_DATA, '_%s' % DISCO_NAME)``.
.. envvar:: DISCO_MASTER_CONFIG
Directory to use for writing cluster configuration.
Default obtained using ``os.path.join(DISCO_ROOT, '%s.config' % DISCO_NAME)``.
.. envvar:: DISCO_NAME
A unique name for the Disco cluster.
Default obtained using ``'disco_%s' % DISCO_PORT``.
.. envvar:: DISCO_LOG_DIR
Directory where log-files are created.
The same path is used for all nodes in the cluster.
Default is obtained using ``os.path.join(DISCO_ROOT, 'log')``.
.. envvar:: DISCO_PID_DIR
Directory where pid-files are created.
The same path is used for all nodes in the cluster.
Default is obtained using ``os.path.join(DISCO_ROOT, 'run')``.
.. envvar:: DISCO_PORT
The port the workers use for `HTTP` communication.
Default is ``8989``.
.. envvar:: DISCO_ROOT
Root directory for Disco-written data and metadata.
Default is obtained using ``os.path.join(DISCO_HOME, 'root')``.
.. envvar:: DISCO_ROTATE_LOG
Whether to rotate the master log on startup.
Default is ``False``.
.. envvar:: DISCO_USER
The user Disco should run as.
Default obtained using ``os.getenv(LOGNAME)``.
.. envvar:: DISCO_JOB_OWNER
User name shown on the job status page for the user who
submitted the job.
Default is the login name @ host.
.. envvar:: DISCO_WWW_ROOT
Directory that is the document root for the master `HTTP` server.
Default obtained using ``os.path.join(DISCO_MASTER_HOME, www)``.
.. envvar:: DISCO_GC_AFTER
How long to wait before garbage collecting job-generated intermediate and result data.
Only results explictly saved to DDFS won't be garbage collected.
Default is ``100 * 365 * 24 * 60 * 60`` (100 years). (Note that this setting does not affect data in DDFS.)
.. envvar:: DISCO_PROFILE
Whether Disco should start profiling applications and send profiling data to
a graphite server.
.. envvar:: GRAPHITE_HOST
If DISCO_PROFILE is set, then some performance data from Disco
will be sent to the graphite host. The default is localhost.
We are assuming that the listening port is the default graphite
port.
.. envvar:: SYSTEMD_ENABLED
This adds -noshell to the erlang process. It provides compatibility for running
disco using a non-forking process type in the service definition.
.. envvar:: DISCO_WORKER_MAX_MEM
How much memory can be used by worker in total. Worker calls `resource.setrlimit(RLIMIT_AS, limit) <http://docs.python.org/library/resource.html#resource.setrlimit>`_ to set the limit when it starts. Can be either a percentage of total available memory or an exact number of bytes. Note that ``setrlimit`` behaves differently on Linux and Mac OS X, see *man setrlimit* for more information. Default is ``80%`` i.e. 80% of the total available memory.
Settings to control the proxying behavior:
.. envvar:: DISCO_PROXY_ENABLED
If set, enable proxying through the master. This is a master-side setting (set in ``master:/etc/disco/settings.py``).
Default is ``''``.
.. envvar:: DISCO_PROXY
The address of the proxy to use on the client side. This is in the format ``http://<proxy-host>:<proxy-port>``, where ``<proxy-port>`` normally matches the value of ``DISCO_PROXY_PORT`` set on the master.
Default is ``''``.
.. envvar:: DISCO_PROXY_PORT
The port the master proxy should run on. This is master-side setting (set in ``master:/etc/disco/settings.py``).
Default is ``8999``.
Settings to control the scheduler behavior:
.. envvar:: DISCO_SCHEDULER
The type of scheduler that disco should use.
The only options are `fair` and `fifo`.
Default is ``fair``.
.. envvar:: DISCO_SCHEDULER_ALPHA
Parameter controlling how much the ``fair`` scheduler punishes long-running jobs vs. short ones.
Default is .001 and should usually not need to be changed.
Settings used by the testing environment:
.. envvar:: DISCO_TEST_DISCODB
Whether or not to run :mod:`discodb` tests.
Default is ``''``.
.. envvar:: DISCO_TEST_HOST
The hostname that the test data server should bind on.
Default is ``DISCO_MASTER_HOST``.
.. envvar:: DISCO_TEST_PORT
The port that the test data server should bind to.
Default is ``9444``.
Settings used by DDFS:
.. envvar:: DDFS_ROOT
.. deprecated:: 0.4
Use :envvar:`DDFS_DATA` instead.
Only provided as a default for backwards compatability.
Default is obtained using ``os.path.join(DISCO_ROOT, 'ddfs')``.
.. envvar:: DDFS_DATA
The root data directory for DDFS.
Default is obtained using ``DDFS_ROOT``.
.. envvar:: DDFS_PUT_PORT
The port to use for writing to DDFS nodes.
Must be open to the Disco client unless proxying is used.
Default is ``8990``.
.. envvar:: DDFS_PUT_MAX
The maximum default number of retries for a `PUT` operation.
Default is ``3``.
.. envvar:: DDFS_GET_MAX
The maximum default number of retries for a `GET` operation.
Default is ``3``.
.. envvar:: DDFS_READ_TOKEN
The default read authorization token to use.
Default is ``None``.
.. envvar:: DDFS_WRITE_TOKEN
The default write authorization token to use.
Default is ``None``.
.. envvar:: DDFS_GC_INITIAL_WAIT
The amount of time to wait after startup before running GC (in minutes).
Default is ``''``, which triggers an internal default of 5 minutes.
.. envvar:: DDFS_GC_BALANCE_THRESHOLD
The distance a node's disk utilization can be from the average
disk utilization of the cluster before the node is considered
to be over-utilized or under-utilized. Default is ``0.1``.
.. envvar:: DDFS_PARANOID_DELETE
Instead of deleting unneeded files, DDFS garbage collector prefixes obsolete files with ``!trash.``, so they can be safely verified/deleted by an external process. For instance, the following command can be used to finally delete the files (assuming that ``DDFS_DATA = "/srv/disco/ddfs"``)::
find /srv/disco/ddfs/ -perm 600 -iname '!trash*' -exec rm {} \;
Default is ``''``.
The following settings are used by DDFS to determine the number of replicas for data/metadata to keep
(it is not recommended to use the provided defaults in a multinode cluster):
.. envvar:: DDFS_TAG_MIN_REPLICAS
The minimum number of replicas for a tag operation to succeed.
Default is ``1``.
.. envvar:: DDFS_TAG_REPLICAS
The number of replicas of tags that DDFS should aspire to keep.
Default is ``1``.
.. envvar:: DDFS_BLOB_REPLICAS
The number of replicas of blobs that DDFS should aspire to keep.
Default is ``1``.
.. envvar:: DDFS_SPACE_AWARE
Whether DDFS should take the amount of free space in the nodes
into account when choosing the nodes to write to. Default is
````.
.. envvar:: DDFS_ABSOLUTE_SPACE
Only effective in the space-aware mode.
If set, the nodes with the higher absolute free space will be
given precedence for hosting replicas. If unset, the nodes with
the highest ratio of the free space to the total space will be
given precedence for hosting the replicas.
"""
import os, socket, pwd
from clx.settings import Settings
class DiscoSettings(Settings):
defaults = {
'DISCO_DATA': "os.path.join(DISCO_ROOT, 'data')",
'DISCO_DEBUG': "'off'",
'DISCO_ERLANG': "guess_erlang()",
'DISCO_EVENTS': "''",
'DISCO_FLAGS': "''",
'DISCO_HOME': "guess_home()",
'DISCO_HTTPD': "'lighttpd -f $DISCO_PROXY_CONFIG'",
'DISCO_MASTER': "'http://%s:%s' % (DISCO_MASTER_HOST, DISCO_PORT)",
'DISCO_MASTER_HOME': "os.path.join(DISCO_HOME, 'master')",
'DISCO_MASTER_HOST': "socket.gethostname()",
'DISCO_MASTER_ROOT': "os.path.join(DISCO_DATA, '_%s' % DISCO_NAME)",
'DISCO_MASTER_CONFIG': "os.path.join(DISCO_ROOT, '%s.config' % DISCO_NAME)",
'DISCO_NAME': "'disco_%s' % DISCO_PORT",
'DISCO_LOG_DIR': "os.path.join(DISCO_ROOT, 'log')",
'DISCO_PID_DIR': "os.path.join(DISCO_ROOT, 'run')",
'DISCO_PORT': "8989",
'DISCO_ROOT': "os.path.join(DISCO_HOME, 'root')",
'DISCO_ROTATE_LOG': "False",
'DISCO_SETTINGS_FILE': "guess_settings()",
'DISCO_WORKER_MAX_MEM': "'80%'",
'DISCO_ULIMIT': "16000000",
'DISCO_USER': "os.getenv('LOGNAME')",
'DISCO_JOB_OWNER': "job_owner()",
'DISCO_WWW_ROOT': "os.path.join(DISCO_MASTER_HOME, 'www')",
# GC
'DISCO_GC_AFTER': "100 * 365 * 24 * 60 * 60",
#'PROFILE'
'DISCO_PROFILE': "'False'",
'GRAPHITE_HOST': "'localhost'",
# OTHER
'SYSTEMD_ENABLED': "False",
# PROXY
'DISCO_PROXY_ENABLED': "''",
'DISCO_PROXY': "''",
'DISCO_PROXY_PORT': "8999",
'DISCO_PROXY_PID': "os.path.join(DISCO_ROOT, '%s-proxy.pid' % DISCO_NAME)",
'DISCO_PROXY_CONFIG': "os.path.join(DISCO_ROOT, '%s-proxy.conf' % DISCO_NAME)",
# SCHEDULER
'DISCO_SCHEDULER': "'fair'",
'DISCO_SCHEDULER_ALPHA': ".001",
# TESTING
'DISCO_TEST_DISCODB': "''",
'DISCO_TEST_HOST': "socket.gethostname()",
'DISCO_TEST_PORT': "9444",
'DISCO_TEST_PROFILE': "''",
'DISCO_TEST_PURGE': "'purge'",
# DDFS
'DDFS_SPACE_AWARE': "''",
'DDFS_ABSOLUTE_SPACE': "''",
'DDFS_ROOT': "os.path.join(DISCO_ROOT, 'ddfs')",
'DDFS_DATA': "DDFS_ROOT",
'DDFS_PUT_PORT': "8990",
'DDFS_PUT_MAX': "3",
'DDFS_GET_MAX': "3",
'DDFS_READ_TOKEN': "None",
'DDFS_WRITE_TOKEN': "None",
'DDFS_TAG_MIN_REPLICAS': "1",
'DDFS_TAG_REPLICAS': "1",
'DDFS_BLOB_REPLICAS': "1",
'DDFS_PARANOID_DELETE': "''",
'DDFS_GC_INITIAL_WAIT': "''",
'DDFS_GC_BALANCE_THRESHOLD': "0.1"
}
globals = globals()
must_exist = ('DISCO_DATA',
'DISCO_ROOT',
'DISCO_MASTER_HOME',
'DISCO_MASTER_ROOT',
'DISCO_LOG_DIR',
'DISCO_PID_DIR',
'DDFS_DATA')
settings_file_var = 'DISCO_SETTINGS_FILE'
def ensuredirs(self):
for name in self.must_exist:
self.safedir(name)
config = self['DISCO_MASTER_CONFIG']
try:
import multiprocessing
nCpus = multiprocessing.cpu_count()
except:
nCpus = 1
if not os.path.exists(config):
open(config, 'w').write('[["localhost","{}"]]'.format(nCpus))
def job_owner():
return "%s@%s" % (pwd.getpwuid(os.getuid()).pw_name,
socket.gethostname())
def guess_erlang():
if os.uname()[0] == 'Darwin' and int(os.uname()[2].split('.')[0]) < 14:
return '/usr/libexec/StartupItemContext erl'
return 'erl'
def guess_home():
from disco.error import DiscoError
from disco.fileutils import get_valid_path
disco_lib = os.path.dirname(get_valid_path(__file__))
disco_home = os.path.dirname(os.path.dirname(disco_lib))
if os.path.exists(os.path.join(disco_home, '.disco-home')):
return disco_home
raise DiscoError("DISCO_HOME is not specified, where should Disco live?")
def guess_settings():
for settings_file in (os.path.expanduser('~/.disco'),
'/etc/disco/settings.py'):
if os.path.exists(settings_file):
return settings_file
return ''
| mozilla/disco | lib/disco/settings.py | Python | bsd-3-clause | 15,713 |
"""
#;+
#; NAME:
#; abs_line
#; Version 1.0
#;
#; PURPOSE:
#; Module for individual absorption lines
#; 01-Nov-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from pdb import *
from xastropy.xutils.printing import printcol as xpcol
from xastropy.plotting.simple import plot_1d_arrays as xplot
from xastropy.plotting.simple import plot_hist as xhist
""" Didn't work
def pyqt_trace():
from PyQt4.QtCore import pyqtRemoveInputHook
pyqtRemoveInputHook()
set_trace()
"""
| alfeinsod/xastropy | xastropy/xutils/xdebug.py | Python | bsd-3-clause | 541 |
# -*- test-case-name: vumi.components.tests.test_message_formatters -*-
from csv import writer
from zope.interface import Interface, implements
class IMessageFormatter(Interface):
""" Interface for writing messages to an HTTP request. """
def add_http_headers(request):
"""
Add any needed HTTP headers to the request.
Often used to set the Content-Type header.
"""
def write_row_header(request):
"""
Write any header bytes that need to be written to the request before
messages.
"""
def write_row(request, message):
"""
Write a :class:`TransportUserMessage` to the request.
"""
class JsonFormatter(object):
""" Formatter for writing messages to requests as JSON. """
implements(IMessageFormatter)
def add_http_headers(self, request):
resp_headers = request.responseHeaders
resp_headers.addRawHeader(
'Content-Type', 'application/json; charset=utf-8')
def write_row_header(self, request):
pass
def write_row(self, request, message):
request.write(message.to_json())
request.write('\n')
class CsvFormatter(object):
""" Formatter for writing messages to requests as CSV. """
implements(IMessageFormatter)
FIELDS = (
'timestamp',
'message_id',
'to_addr',
'from_addr',
'in_reply_to',
'session_event',
'content',
'group',
)
def add_http_headers(self, request):
resp_headers = request.responseHeaders
resp_headers.addRawHeader(
'Content-Type', 'text/csv; charset=utf-8')
def write_row_header(self, request):
writer(request).writerow(self.FIELDS)
def write_row(self, request, message):
writer(request).writerow([
self._format_field(field, message) for field in self.FIELDS])
def _format_field(self, field, message):
field_formatter = getattr(self, '_format_field_%s' % (field,), None)
if field_formatter is not None:
field_value = field_formatter(message)
else:
field_value = self._format_field_default(field, message)
return field_value.encode('utf-8')
def _format_field_default(self, field, message):
return message[field] or u''
def _format_field_timestamp(self, message):
return message['timestamp'].isoformat()
| TouK/vumi | vumi/components/message_formatters.py | Python | bsd-3-clause | 2,440 |
from wtforms import Form
from tests import MultiDict
from wtforms_alchemy import null_or_unicode, SelectField
class TestSelectField(object):
def test_understands_none_values(self):
class MyForm(Form):
choice_field = SelectField(
choices=[('', '-- Choose --'), ('choice 1', 'Something')],
coerce=null_or_unicode
)
form = MyForm(MultiDict({'choice_field': u''}))
form.validate()
assert form.errors == {}
| kelvinhammond/wtforms-alchemy | tests/test_custom_fields.py | Python | bsd-3-clause | 498 |
# Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# Revised BSD License, included in this distribution as LICENSE
"""
CLI program for managing Metatab files
"""
import json
import sys
from genericpath import exists
from metatab import DEFAULT_METATAB_FILE, MetatabDoc, parse_app_url
from rowgenerators.util import get_cache, clean_cache
from os.path import dirname
from rowgenerators.util import fs_join as join
import logging
logger = logging.getLogger('user')
logger_err = logging.getLogger('cli-errors')
debug_logger = logging.getLogger('debug')
cache = get_cache()
def metatab():
import argparse
parser = argparse.ArgumentParser(
prog='metatab',
description='Matatab file parser',
epilog='Cache dir: {}\n'.format(str(cache.getsyspath('/') ) ))
g = parser.add_mutually_exclusive_group()
g.add_argument('-C', '--create', action='store', nargs='?', default=False,
help="Create a new metatab file, from named template. With no argument, uses the 'metatab' template ")
g.add_argument('-t', '--terms', default=False, action='store_const', dest='out_type', const='terms',
help='Parse a file and print out the stream of terms, before interpretation')
g.add_argument('-j', '--json', default=False, action='store_const', dest='out_type', const='json',
help='Parse a file and print out a JSON representation')
g.add_argument('-y', '--yaml', default=False, action='store_const', dest='out_type', const='yaml',
help='Parse a file and print out a YAML representation')
g.add_argument('-l', '--line', default=False, action='store_const', dest='out_type', const='line',
help='Parse a file and print out a Metatab Line representation')
g.add_argument('-c', '--csv', default=False, action='store_const', dest='out_type', const='csv',
help='Parse a file and print out a Metatab Line representation')
g.add_argument('-p', '--prety', default=False, action='store_const', dest='out_type', const='prety',
help='Pretty print the python Dict representation ')
parser.add_argument('-W', '--write-in-place',
help='When outputting as yaml, json, csv or line, write the file instead of printing it, '
'to a file with same base name and appropriate extension ', action='store_true')
parser.set_defaults(out_type='csv')
parser.add_argument('-f', '--find-first',
help='Find and print the first value for a fully qualified term name')
parser.add_argument('-d', '--show-declaration', default=False, action='store_true',
help='Parse a declaration file and print out declaration dict. Use -j or -y for the format')
parser.add_argument('file', nargs='?', default=DEFAULT_METATAB_FILE, help='Path to a Metatab file')
cli_init()
args = parser.parse_args(sys.argv[1:])
# Specing a fragment screws up setting the default metadata file name
if args.file.startswith('#'):
args.file = DEFAULT_METATAB_FILE + args.file
if args.create is not False:
if new_metatab_file(args.file, args.create):
prt("Created ", args.file)
else:
warn("File",args.file,'already exists.')
exit(0)
metadata_url = parse_app_url(args.file, proto='metatab')
try:
doc = MetatabDoc(metadata_url, cache=cache)
except IOError as e:
err("Failed to open '{}': {}".format(metadata_url, e))
def write_or_print(t):
from pathlib import Path
if metadata_url.scheme != 'file':
err("Can only use -w with local files")
return
ext = 'txt' if args.out_type == 'line' else args.out_type
if args.write_in_place:
with metadata_url.fspath.with_suffix('.'+ext).open('w') as f:
f.write(t)
else:
print(t)
if args.show_declaration:
decl_doc = MetatabDoc('', cache=cache, decl=metadata_url.path)
d = {
'terms': decl_doc.decl_terms,
'sections': decl_doc.decl_sections
}
if args.out_type == 'json':
print(json.dumps(d, indent=4))
elif args.out_type == 'yaml':
import yaml
print(yaml.safe_dump(d, default_flow_style=False, indent=4))
elif args.find_first:
t = doc.find_first(args.find_first)
print(t.value)
elif args.out_type == 'terms':
for t in doc._term_parser:
print(t)
elif args.out_type == 'json':
write_or_print(json.dumps(doc.as_dict(), indent=4))
elif args.out_type == 'yaml':
import yaml
from collections import OrderedDict
def ordered_dump(data, stream=None, Dumper=yaml.Dumper, **kwds):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds)
write_or_print(ordered_dump(doc.as_dict(), default_flow_style=False, indent=4, Dumper=yaml.SafeDumper))
elif args.out_type == 'line':
write_or_print(doc.as_lines())
elif args.out_type == 'csv':
write_or_print(doc.as_csv())
elif args.out_type == 'prety':
from pprint import pprint
pprint(doc.as_dict())
exit(0)
def cli_init(log_level=logging.INFO):
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(logging.Formatter('%(message)s'))
out_hdlr.setLevel(log_level)
logger.addHandler(out_hdlr)
logger.setLevel(log_level)
out_hdlr = logging.StreamHandler(sys.stderr)
out_hdlr.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
out_hdlr.setLevel(logging.WARN)
logger_err.addHandler(out_hdlr)
logger_err.setLevel(logging.WARN)
def prt(*args, **kwargs):
logger.info(' '.join(str(e) for e in args),**kwargs)
def warn(*args, **kwargs):
logger_err.warn(' '.join(str(e) for e in args),**kwargs)
def err(*args, **kwargs):
logger_err.critical(' '.join(str(e) for e in args),**kwargs)
sys.exit(1)
def make_metatab_file(template='metatab'):
import metatab.templates as tmpl
template_path = join(dirname(tmpl.__file__),template+'.csv')
doc = MetatabDoc(template_path)
return doc
def new_metatab_file(mt_file, template):
template = template if template else 'metatab'
if not exists(mt_file):
doc = make_metatab_file(template)
doc.write_csv(mt_file)
return True
else:
return False
def get_table(doc, name):
t = doc.find_first('Root.Table', value=name)
if not t:
table_names = ["'" + t.value + "'" for t in doc.find('Root.Table')]
if not table_names:
table_names = ["<No Tables>"]
err("Did not find schema for table name '{}' Tables are: {}"
.format(name, " ".join(table_names)))
return t
| CivicKnowledge/metatab | metatab/cli.py | Python | bsd-3-clause | 7,253 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Utilities for using modules
"""
import frappe, os
import frappe.utils
from frappe import _
lower_case_files_for = ['DocType', 'Page', 'Report',
"Workflow", 'Module Def', 'Desktop Item', 'Workflow State', 'Workflow Action', 'Print Format']
def scrub(txt):
return frappe.scrub(txt)
def scrub_dt_dn(dt, dn):
"""Returns in lowercase and code friendly names of doctype and name for certain types"""
ndt, ndn = dt, dn
if dt in lower_case_files_for:
ndt, ndn = scrub(dt), scrub(dn)
return ndt, ndn
def get_module_path(module):
"""Returns path of the given module"""
return frappe.get_module_path(module)
def get_doc_path(module, doctype, name):
dt, dn = scrub_dt_dn(doctype, name)
return os.path.join(get_module_path(module), dt, dn)
def reload_doc(module, dt=None, dn=None, force=True):
from frappe.modules.import_file import import_files
return import_files(module, dt, dn, force=force)
def export_doc(doctype, name, module=None):
"""Write a doc to standard path."""
from frappe.modules.export_file import write_document_file
if not module: module = frappe.db.get_value(doctype, name, 'module')
write_document_file(frappe.get_doc(doctype, name), module)
def get_doctype_module(doctype):
"""Returns **Module Def** name of given doctype."""
return frappe.db.get_value('DocType', doctype, 'module') or "core"
doctype_python_modules = {}
def load_doctype_module(doctype, module=None, prefix=""):
"""Returns the module object for given doctype."""
if not module:
module = get_doctype_module(doctype)
app = get_module_app(module)
key = (app, doctype, prefix)
if key not in doctype_python_modules:
doctype_python_modules[key] = frappe.get_module(get_module_name(doctype, module, prefix))
return doctype_python_modules[key]
def get_module_name(doctype, module, prefix="", app=None):
return '{app}.{module}.doctype.{doctype}.{prefix}{doctype}'.format(\
app = scrub(app or get_module_app(module)),
module = scrub(module),
doctype = scrub(doctype),
prefix=prefix)
def get_module_app(module):
return frappe.local.module_app[scrub(module)]
def get_app_publisher(module):
app = frappe.local.module_app[scrub(module)]
if not app:
frappe.throw(_("App not found"))
app_publisher = frappe.get_hooks(hook="app_publisher", app_name=app)[0]
return app_publisher
def make_boilerplate(template, doc, opts=None):
target_path = get_doc_path(doc.module, doc.doctype, doc.name)
template_name = template.replace("controller", scrub(doc.name))
target_file_path = os.path.join(target_path, template_name)
app_publisher = get_app_publisher(doc.module)
if not os.path.exists(target_file_path):
if not opts:
opts = {}
with open(target_file_path, 'w') as target:
with open(os.path.join(get_module_path("core"), "doctype", scrub(doc.doctype),
"boilerplate", template), 'r') as source:
target.write(source.read().format(app_publisher=app_publisher,
classname=doc.name.replace(" ", ""), doctype=doc.name, **opts))
| gangadharkadam/letzfrappe | frappe/modules/__init__.py | Python | mit | 3,117 |
# coding=utf-8
"""
This class collects data on memory utilization
Note that MemFree may report no memory free. This may not actually be the case,
as memory is allocated to Buffers and Cache as well. See
[this link](http://www.linuxatemyram.com/) for more details.
#### Dependencies
* /proc/meminfo or psutil
"""
import diamond.collector
import diamond.convertor
import os
try:
import psutil
except ImportError:
psutil = None
_KEY_MAPPING = [
'MemAvailable',
'MemTotal',
'MemFree',
'Buffers',
'Cached',
'Active',
'Dirty',
'Inactive',
'Shmem',
'SwapTotal',
'SwapFree',
'SwapCached',
'VmallocTotal',
'VmallocUsed',
'VmallocChunk',
'Committed_AS',
]
class MemoryCollector(diamond.collector.Collector):
PROC = '/proc/meminfo'
def get_default_config_help(self):
config_help = super(MemoryCollector, self).get_default_config_help()
config_help.update({
'detailed': 'Set to True to Collect all the nodes',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MemoryCollector, self).get_default_config()
config.update({
'path': 'memory',
# Collect all the nodes or just a few standard ones?
# Uncomment to enable
# 'detailed': 'True'
})
return config
def collect(self):
"""
Collect memory stats
"""
if os.access(self.PROC, os.R_OK):
file = open(self.PROC)
data = file.read()
file.close()
for line in data.splitlines():
try:
name, value, units = line.split()
name = name.rstrip(':')
value = int(value)
if ((name not in _KEY_MAPPING and
'detailed' not in self.config)):
continue
for unit in self.config['byte_unit']:
value = diamond.convertor.binary.convert(value=value,
oldUnit=units,
newUnit=unit)
self.publish(name, value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
except ValueError:
continue
return True
else:
if not psutil:
self.log.error('Unable to import psutil')
self.log.error('No memory metrics retrieved')
return None
# psutil.phymem_usage() and psutil.virtmem_usage() are deprecated.
if hasattr(psutil, "phymem_usage"):
phymem_usage = psutil.phymem_usage()
virtmem_usage = psutil.virtmem_usage()
else:
phymem_usage = psutil.virtual_memory()
virtmem_usage = psutil.swap_memory()
units = 'B'
for unit in self.config['byte_unit']:
value = diamond.convertor.binary.convert(
value=phymem_usage.total, oldUnit=units, newUnit=unit)
self.publish('MemTotal', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=phymem_usage.available, oldUnit=units, newUnit=unit)
self.publish('MemAvailable', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=phymem_usage.free, oldUnit=units, newUnit=unit)
self.publish('MemFree', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.total, oldUnit=units, newUnit=unit)
self.publish('SwapTotal', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.free, oldUnit=units, newUnit=unit)
self.publish('SwapFree', value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
return True
return None
| mfriedenhagen/Diamond | src/collectors/memory/memory.py | Python | mit | 4,359 |
from . import Event, get_timestamp
from ..shared import console_repr
class Console(Event):
contains = ('line', 'time', 'user', 'source', 'kind', 'data', 'level')
requires = ('line',)
line = Event.Arg(required=True)
kind = Event.Arg()
time = Event.Arg()
user = Event.Arg(default='')
source = Event.Arg(default='mark2')
data = Event.Arg()
level = Event.Arg()
def setup(self):
if not self.time:
self.time = get_timestamp(self.time)
if not self.data:
self.data = self.line
def value(self):
return console_repr(self)
| frostyfrog/mark2 | mk2/events/console.py | Python | mit | 626 |
from __future__ import print_function
from .CoherentLaser import *
| acq4/acq4 | acq4/devices/CoherentLaser/__init__.py | Python | mit | 67 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import pytest
class TestCookies(object):
config = """
tasks:
test_cookies:
text:
url: http://httpbin.org/cookies
entry:
title: '\"title\": \"(.*)\"'
url: '\"url\": \"(.*)\"'
cookies: cookies.txt
"""
@pytest.mark.online()
def test_cookies(self, request, execute_task):
task = execute_task('test_cookies', options={'nocache': True})
assert task.find_entry(title='blah', url='aoeu'), 'Entry should have been created.'
| qvazzler/Flexget | tests/test_cookies.py | Python | mit | 695 |
import os
from django.core.urlresolvers import reverse_lazy
try:
import otp_yubikey
except ImportError:
otp_yubikey = None
BASE_DIR = os.path.dirname(__file__)
SECRET_KEY = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django_otp',
'django_otp.plugins.otp_static',
'django_otp.plugins.otp_totp',
'two_factor',
'tests',
]
if otp_yubikey:
INSTALLED_APPS += ['otp_yubikey']
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django_otp.middleware.OTPMiddleware',
'two_factor.middleware.threadlocals.ThreadLocals',
)
ROOT_URLCONF = 'tests.urls'
LOGOUT_URL = reverse_lazy('logout')
LOGIN_URL = reverse_lazy('two_factor:login')
LOGIN_REDIRECT_URL = reverse_lazy('two_factor:profile')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
TWO_FACTOR_PATCH_ADMIN = False
AUTH_USER_MODEL = os.environ.get('AUTH_USER_MODEL', 'auth.User')
| mathspace/django-two-factor-auth | tests/settings.py | Python | mit | 1,499 |
# -*- coding: utf-8 -*-
"""
(c) 2014 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <[email protected]>
"""
from anitya.lib.backends import BaseBackend
from anitya.lib.exceptions import AnityaPluginException
class RubygemsBackend(BaseBackend):
''' The custom class for projects hosted on rubygems.org.
This backend allows to specify a version_url and a regex that will
be used to retrieve the version information.
'''
name = 'Rubygems'
examples = [
'http://rubygems.org/gems/aa',
'http://rubygems.org/gems/bio',
]
@classmethod
def get_version(cls, project):
''' Method called to retrieve the latest version of the projects
provided, project that relies on the backend of this plugin.
:arg Project project: a :class:`model.Project` object whose backend
corresponds to the current plugin.
:return: the latest version found upstream
:return type: str
:raise AnityaPluginException: a
:class:`anitya.lib.exceptions.AnityaPluginException` exception
when the version cannot be retrieved correctly
'''
return cls.get_ordered_versions(project)[-1]
@classmethod
def get_versions(cls, project):
''' Method called to retrieve all the versions (that can be found)
of the projects provided, project that relies on the backend of
this plugin.
:arg Project project: a :class:`model.Project` object whose backend
corresponds to the current plugin.
:return: a list of all the possible releases found
:return type: list
:raise AnityaPluginException: a
:class:`anitya.lib.exceptions.AnityaPluginException` exception
when the versions cannot be retrieved correctly
'''
url = 'http://rubygems.org/api/v1/versions/%(name)s/latest.json' % {
'name': project.name}
try:
req = cls.call_url(url)
except Exception: # pragma: no cover
raise AnityaPluginException('Could not contact %s' % url)
try:
data = req.json()
except Exception: # pragma: no cover
raise AnityaPluginException('No JSON returned by %s' % url)
if data['version'] == 'unknown':
raise AnityaPluginException(
'Project or version unknown at %s' % url)
return [data['version']]
| Prashant-Surya/anitya | anitya/lib/backends/rubygems.py | Python | gpl-2.0 | 2,448 |
import logging
from autotest.client.shared import error
from virttest import qemu_monitor
def run(test, params, env):
"""
QMP Specification test-suite: this checks if the *basic* protocol conforms
to its specification, which is file QMP/qmp-spec.txt in QEMU's source tree.
IMPORTANT NOTES:
o Most tests depend heavily on QMP's error information (eg. classes),
this might have bad implications as the error interface is going to
change in QMP
o Command testing is *not* covered in this suite. Each command has its
own specification and should be tested separately
o We use the same terminology as used by the QMP specification,
specially with regard to JSON types (eg. a Python dict is called
a json-object)
o This is divided in sub test-suites, please check the bottom of this
file to check the order in which they are run
TODO:
o Finding which test failed is not as easy as it should be
o Are all those check_*() functions really needed? Wouldn't a
specialized class (eg. a Response class) do better?
"""
def fail_no_key(qmp_dict, key):
if not isinstance(qmp_dict, dict):
raise error.TestFail("qmp_dict is not a dict (it's '%s')" %
type(qmp_dict))
if key not in qmp_dict:
raise error.TestFail("'%s' key doesn't exist in dict ('%s')" %
(key, str(qmp_dict)))
def check_dict_key(qmp_dict, key, keytype):
"""
Performs the following checks on a QMP dict key:
1. qmp_dict is a dict
2. key exists in qmp_dict
3. key is of type keytype
If any of these checks fails, error.TestFail is raised.
"""
fail_no_key(qmp_dict, key)
if not isinstance(qmp_dict[key], keytype):
raise error.TestFail("'%s' key is not of type '%s', it's '%s'" %
(key, keytype, type(qmp_dict[key])))
def check_key_is_dict(qmp_dict, key):
check_dict_key(qmp_dict, key, dict)
def check_key_is_list(qmp_dict, key):
check_dict_key(qmp_dict, key, list)
def check_key_is_str(qmp_dict, key):
check_dict_key(qmp_dict, key, unicode)
def check_str_key(qmp_dict, keyname, value=None):
check_dict_key(qmp_dict, keyname, unicode)
if value and value != qmp_dict[keyname]:
raise error.TestFail("'%s' key value '%s' should be '%s'" %
(keyname, str(qmp_dict[keyname]), str(value)))
def check_key_is_int(qmp_dict, key):
fail_no_key(qmp_dict, key)
try:
int(qmp_dict[key])
except Exception:
raise error.TestFail("'%s' key is not of type int, it's '%s'" %
(key, type(qmp_dict[key])))
def check_bool_key(qmp_dict, keyname, value=None):
check_dict_key(qmp_dict, keyname, bool)
if value and value != qmp_dict[keyname]:
raise error.TestFail("'%s' key value '%s' should be '%s'" %
(keyname, str(qmp_dict[keyname]), str(value)))
def check_success_resp(resp, empty=False):
"""
Check QMP OK response.
:param resp: QMP response
:param empty: if True, response should not contain data to return
"""
check_key_is_dict(resp, "return")
if empty and len(resp["return"]) > 0:
raise error.TestFail("success response is not empty ('%s')" %
str(resp))
def check_error_resp(resp, classname=None, datadict=None):
"""
Check QMP error response.
:param resp: QMP response
:param classname: Expected error class name
:param datadict: Expected error data dictionary
"""
logging.debug("resp %s", str(resp))
check_key_is_dict(resp, "error")
check_key_is_str(resp["error"], "class")
if classname and resp["error"]["class"] != classname:
raise error.TestFail("got error class '%s' expected '%s'" %
(resp["error"]["class"], classname))
check_key_is_dict(resp["error"], "data")
if datadict and resp["error"]["data"] != datadict:
raise error.TestFail("got data dict '%s' expected '%s'" %
(resp["error"]["data"], datadict))
def test_version(version):
"""
Check the QMP greeting message version key which, according to QMP's
documentation, should be:
{ "qemu": { "major": json-int, "minor": json-int, "micro": json-int }
"package": json-string }
"""
check_key_is_dict(version, "qemu")
check_key_is_str(version, "package")
def test_greeting(greeting):
check_key_is_dict(greeting, "QMP")
check_key_is_dict(greeting["QMP"], "version")
check_key_is_list(greeting["QMP"], "capabilities")
def greeting_suite(monitor):
"""
Check the greeting message format, as described in the QMP
specfication section '2.2 Server Greeting'.
{ "QMP": { "version": json-object, "capabilities": json-array } }
"""
greeting = monitor.get_greeting()
test_greeting(greeting)
test_version(greeting["QMP"]["version"])
def json_parsing_errors_suite(monitor):
"""
Check that QMP's parser is able to recover from parsing errors, please
check the JSON spec for more info on the JSON syntax (RFC 4627).
"""
# We're quite simple right now and the focus is on parsing errors that
# have already biten us in the past.
#
# TODO: The following test-cases are missing:
#
# - JSON numbers, strings and arrays
# - More invalid characters or malformed structures
# - Valid, but not obvious syntax, like zillion of spaces or
# strings with unicode chars (different suite maybe?)
bad_json = []
# A JSON value MUST be an object, array, number, string, true, false,
# or null
#
# NOTE: QMP seems to ignore a number of chars, like: | and ?
bad_json.append(":")
bad_json.append(",")
# Malformed json-objects
#
# NOTE: sending only "}" seems to break QMP
# NOTE: Duplicate keys are accepted (should it?)
bad_json.append("{ \"execute\" }")
bad_json.append("{ \"execute\": \"query-version\", }")
bad_json.append("{ 1: \"query-version\" }")
bad_json.append("{ true: \"query-version\" }")
bad_json.append("{ []: \"query-version\" }")
bad_json.append("{ {}: \"query-version\" }")
for cmd in bad_json:
resp = monitor.cmd_raw(cmd)
check_error_resp(resp, "JSONParsing")
def test_id_key(monitor):
"""
Check that QMP's "id" key is correctly handled.
"""
# The "id" key must be echoed back in error responses
id_key = "virt-test"
resp = monitor.cmd_qmp("eject", {"foobar": True}, q_id=id_key)
check_error_resp(resp)
check_str_key(resp, "id", id_key)
# The "id" key must be echoed back in success responses
resp = monitor.cmd_qmp("query-status", q_id=id_key)
check_success_resp(resp)
check_str_key(resp, "id", id_key)
# The "id" key can be any json-object
for id_key in (True, 1234, "string again!", [1, [], {}, True, "foo"],
{"key": {}}):
resp = monitor.cmd_qmp("query-status", q_id=id_key)
check_success_resp(resp)
if resp["id"] != id_key:
raise error.TestFail("expected id '%s' but got '%s'" %
(str(id_key), str(resp["id"])))
def test_invalid_arg_key(monitor):
"""
Currently, the only supported keys in the input object are: "execute",
"arguments" and "id". Although expansion is supported, invalid key
names must be detected.
"""
resp = monitor.cmd_obj({"execute": "eject", "foobar": True})
expected_error = "QMPExtraInputObjectMember"
data_dict = {"member": "foobar"}
check_error_resp(resp, expected_error, data_dict)
def test_bad_arguments_key_type(monitor):
"""
The "arguments" key must be an json-object.
We use the eject command to perform the tests, but that's a random
choice, any command that accepts arguments will do, as the command
doesn't get called.
"""
for item in (True, [], 1, "foo"):
resp = monitor.cmd_obj({"execute": "eject", "arguments": item})
check_error_resp(resp, "QMPBadInputObjectMember",
{"member": "arguments", "expected": "object"})
def test_bad_execute_key_type(monitor):
"""
The "execute" key must be a json-string.
"""
for item in (False, 1, {}, []):
resp = monitor.cmd_obj({"execute": item})
check_error_resp(resp, "QMPBadInputObjectMember",
{"member": "execute", "expected": "string"})
def test_no_execute_key(monitor):
"""
The "execute" key must exist, we also test for some stupid parsing
errors.
"""
for cmd in ({}, {"execut": "qmp_capabilities"},
{"executee": "qmp_capabilities"}, {"foo": "bar"}):
resp = monitor.cmd_obj(cmd)
check_error_resp(resp) # XXX: check class and data dict?
def test_bad_input_obj_type(monitor):
"""
The input object must be... an json-object.
"""
for cmd in ("foo", [], True, 1):
resp = monitor.cmd_obj(cmd)
check_error_resp(resp, "QMPBadInputObject", {"expected": "object"})
def test_good_input_obj(monitor):
"""
Basic success tests for issuing QMP commands.
"""
# NOTE: We don't use the cmd_qmp() method here because the command
# object is in a 'random' order
resp = monitor.cmd_obj({"execute": "query-version"})
check_success_resp(resp)
resp = monitor.cmd_obj({"arguments": {}, "execute": "query-version"})
check_success_resp(resp)
id_key = "1234foo"
resp = monitor.cmd_obj({"id": id_key, "execute": "query-version",
"arguments": {}})
check_success_resp(resp)
check_str_key(resp, "id", id_key)
# TODO: would be good to test simple argument usage, but we don't have
# a read-only command that accepts arguments.
def input_object_suite(monitor):
"""
Check the input object format, as described in the QMP specfication
section '2.3 Issuing Commands'.
{ "execute": json-string, "arguments": json-object, "id": json-value }
"""
test_good_input_obj(monitor)
test_bad_input_obj_type(monitor)
test_no_execute_key(monitor)
test_bad_execute_key_type(monitor)
test_bad_arguments_key_type(monitor)
test_id_key(monitor)
test_invalid_arg_key(monitor)
def argument_checker_suite(monitor):
"""
Check that QMP's argument checker is detecting all possible errors.
We use a number of different commands to perform the checks, but the
command used doesn't matter much as QMP performs argument checking
_before_ calling the command.
"""
# qmp in RHEL6 is different from 0.13.*:
# 1. 'stop' command just return {} evenif stop have arguments.
# 2. there is no 'screendump' command.
# 3. argument isn't checked in 'device' command.
# so skip these tests in RHEL6.
# test optional argument: 'force' is omitted, but it's optional, so
# the handler has to be called. Test this happens by checking an
# error that is generated by the handler itself.
resp = monitor.cmd_qmp("eject", {"device": "foobar"})
check_error_resp(resp, "DeviceNotFound")
# val argument must be a json-int
for arg in ({}, [], True, "foo"):
resp = monitor.cmd_qmp("memsave", {"val": arg, "filename": "foo",
"size": 10})
check_error_resp(resp, "InvalidParameterType",
{"name": "val", "expected": "int"})
# value argument must be a json-number
for arg in ({}, [], True, "foo"):
resp = monitor.cmd_qmp("migrate_set_speed", {"value": arg})
check_error_resp(resp, "InvalidParameterType",
{"name": "value", "expected": "number"})
# qdev-type commands have their own argument checker, all QMP does
# is to skip its checking and pass arguments through. Check this
# works by providing invalid options to device_add and expecting
# an error message from qdev
resp = monitor.cmd_qmp("device_add", {"driver": "e1000", "foo": "bar"})
check_error_resp(resp, "PropertyNotFound",
{"device": "e1000", "property": "foo"})
def unknown_commands_suite(monitor):
"""
Check that QMP handles unknown commands correctly.
"""
# We also call a HMP-only command, to be sure it will fail as expected
for cmd in ("bar", "query-", "query-foo", "help"):
resp = monitor.cmd_qmp(cmd)
check_error_resp(resp, "CommandNotFound", {"name": cmd})
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
# Look for the first qmp monitor available, otherwise, fail the test
qmp_monitor = vm.get_monitors_by_type("qmp")
if qmp_monitor:
qmp_monitor = qmp_monitor[0]
else:
raise error.TestError('Could not find a QMP monitor, aborting test')
# Run all suites
greeting_suite(qmp_monitor)
input_object_suite(qmp_monitor)
argument_checker_suite(qmp_monitor)
unknown_commands_suite(qmp_monitor)
json_parsing_errors_suite(qmp_monitor)
# check if QMP is still alive
if not qmp_monitor.is_responsive():
raise error.TestFail('QMP monitor is not responsive after testing')
| uni-peter-zheng/tp-qemu | qemu/tests/qmp_basic_rhel6.py | Python | gpl-2.0 | 14,360 |
#!/usr/bin/env python
# encoding: utf-8
# Convert help for ginsh functions from man page to C source
import sys, re, optparse
rxStart = re.compile('^.*GINSH_FCN_HELP_START$')
rxEnd = re.compile('^.*GINSH_FCN_HELP_END$')
fcnRx = re.compile('^[.]BI\s+')
hlpRx = re.compile('\\\\[-]')
codeFmt = 'insert_help("%s",\n"%s"\n);\n'
def parseProto(pStr):
pStr = fcnRx.sub('', pStr)
pStr = re.sub('\s', '', pStr)
pStr = re.sub('"', '', pStr)
pStr = re.sub(',', ', ', pStr)
pStr = re.sub('\\[', ' [', pStr)
name = pStr.split('(')[0]
return name, pStr
def extractHelp(inp, out):
name, proto, synopsis = None, None, None
seenStart = False
for l in inp:
l = l.strip()
if not seenStart:
if rxStart.match(l):
seenStart = True
continue
if rxEnd.match(l):
break
if fcnRx.match(l):
name, proto = parseProto(l)
elif hlpRx.match(l):
l = hlpRx.sub('', l).strip()
l = re.sub('"', "'", l)
synopsis = '%s"\n" - %s' % ( proto, l )
elif l.lower() == '.br':
synopsis = synopsis or proto
out.write(codeFmt % ( name, synopsis ))
name, proto, synopsis = None, None, None
def main():
op = optparse.OptionParser()
op.add_option('-o', dest = 'outfile')
options, args = op.parse_args()
outfile = sys.stdout
infile = sys.stdin
if not options.outfile is None:
outfile = open(options.outfile, 'wt')
if len(args) >= 1:
infile = open(args[0])
extractHelp(infile, outfile)
if infile != sys.stdin:
infile.close()
outfile.flush()
if outfile != sys.stdout:
outfile.close()
if __name__ == '__main__':
main()
sys.exit(0)
| ARudik/feelpp.ginac | ginsh/ginsh_fcn_help.py | Python | gpl-2.0 | 1,558 |
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class tcsh(test.test):
"""
Autotest module for testing basic functionality
of tcsh
@author Robert Paulsen, [email protected]
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.call(test_path + '/tcsh' + '/tcsh.sh', shell=True)
if ret_val != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| PoornimaNayak/autotest-client-tests | linux-tools/tcsh/tcsh.py | Python | gpl-2.0 | 1,158 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_support,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_affinity_groups
short_description: Module to manage affinity groups in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "This module manage affinity groups in oVirt. It can also manage assignments
of those groups to VMs."
options:
name:
description:
- "Name of the the affinity group to manage."
required: true
state:
description:
- "Should the affinity group be present or absent."
choices: ['present', 'absent']
default: present
cluster:
description:
- "Name of the cluster of the affinity group."
description:
description:
- "Description of the affinity group."
host_enforcing:
description:
- "If I(true) VM cannot start on host if it does not satisfy the C(host_rule)."
- "C(This parameter is support since oVirt 4.1 version.)"
host_rule:
description:
- "If I(positive) I(all) VMs in this group should run on the this host."
- "If I(negative) I(no) VMs in this group should run on the this host."
- "C(This parameter is support since oVirt 4.1 version.)"
choices:
- positive
- negative
vm_enforcing:
description:
- "If I(true) VM cannot start if it does not satisfy the C(vm_rule)."
vm_rule:
description:
- "If I(positive) I(all) VMs in this group should run on the host defined by C(host_rule)."
- "If I(negative) I(no) VMs in this group should run on the host defined by C(host_rule)."
- "If I(disabled) this affinity group doesn't take effect."
choices:
- positive
- negative
- disabled
vms:
description:
- "List of the VMs names, which should have assigned this affinity group."
hosts:
description:
- "List of the hosts names, which should have assigned this affinity group."
- "C(This parameter is support since oVirt 4.1 version.)"
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create(if not exists) and assign affinity group to VMs vm1 and vm2 and host host1
- ovirt_affinity_groups:
name: mygroup
cluster: mycluster
vm_enforcing: true
vm_rule: positive
host_enforcing: true
host_rule: positive
vms:
- vm1
- vm2
hosts:
- host1
# Detach VMs from affinity group and disable VM rule:
- ovirt_affinity_groups:
name: mygroup
cluster: mycluster
vm_enforcing: false
vm_rule: disabled
host_enforcing: true
host_rule: positive
vms: []
hosts:
- host1
- host2
# Remove affinity group
- ovirt_affinity_groups:
state: absent
cluster: mycluster
name: mygroup
'''
RETURN = '''
id:
description: ID of the affinity group which is managed
returned: On success if affinity group is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
affinity_group:
description: "Dictionary of all the affinity group attributes. Affinity group attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/affinity_group."
returned: On success if affinity group is found.
'''
class AffinityGroupsModule(BaseModule):
def __init__(self, vm_ids, host_ids, *args, **kwargs):
super(AffinityGroupsModule, self).__init__(*args, **kwargs)
self._vm_ids = vm_ids
self._host_ids = host_ids
def build_entity(self):
return otypes.AffinityGroup(
name=self._module.params['name'],
description=self._module.params['description'],
positive=(
self._module.params['vm_rule'] == 'positive'
) if self._module.params['vm_rule'] is not None else None,
enforcing=(
self._module.params['vm_enforcing']
) if self._module.params['vm_enforcing'] is not None else None,
vms=[
otypes.Vm(id=vm_id) for vm_id in self._vm_ids
] if self._vm_ids is not None else None,
hosts=[
otypes.Host(id=host_id) for host_id in self._host_ids
] if self._host_ids is not None else None,
vms_rule=otypes.AffinityRule(
positive=(
self._module.params['vm_rule'] == 'positive'
) if self._module.params['vm_rule'] is not None else None,
enforcing=self._module.params['vm_enforcing'],
enabled=(
self._module.params['vm_rule'] in ['negative', 'positive']
) if self._module.params['vm_rule'] is not None else None,
) if (
self._module.params['vm_enforcing'] is not None or
self._module.params['vm_rule'] is not None
) else None,
hosts_rule=otypes.AffinityRule(
positive=(
self._module.params['host_rule'] == 'positive'
) if self._module.params['host_rule'] is not None else None,
enforcing=self._module.params['host_enforcing'],
) if (
self._module.params['host_enforcing'] is not None or
self._module.params['host_rule'] is not None
) else None,
)
def update_check(self, entity):
assigned_vms = sorted([vm.id for vm in entity.vms])
assigned_hosts = sorted([host.id for host in entity.hosts])
return (
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('vm_enforcing'), entity.vms_rule.enforcing) and
equal(self._module.params.get('host_enforcing'), entity.hosts_rule.enforcing) and
equal(self._module.params.get('vm_rule') == 'positive', entity.vms_rule.positive) and
equal(self._module.params.get('vm_rule') in ['negative', 'positive'], entity.vms_rule.enabled) and
equal(self._module.params.get('host_rule') == 'positive', entity.hosts_rule.positive) and
equal(self._vm_ids, assigned_vms) and
equal(self._host_ids, assigned_hosts)
)
def _get_obj_id(obj_service, obj_name):
obj = search_by_name(obj_service, obj_name)
if obj is None:
raise Exception("Object '%s' was not found." % obj_name)
return obj.id
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
cluster=dict(default=None, required=True),
name=dict(default=None, required=True),
description=dict(default=None),
vm_enforcing=dict(default=None, type='bool'),
vm_rule=dict(default=None, choices=['positive', 'negative', 'disabled']),
host_enforcing=dict(default=None, type='bool'),
host_rule=dict(default=None, choices=['positive', 'negative']),
vms=dict(default=None, type='list'),
hosts=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
connection = create_connection(module.params.pop('auth'))
# Check if unsupported parameters were passed:
supported_41 = ('host_enforcing', 'host_rule', 'hosts')
if not check_support(
version='4.1',
connection=connection,
module=module,
params=supported_41,
):
module.fail_json(
msg='Following parameters are supported since 4.1: {params}'.format(
params=supported_41,
)
)
try:
clusters_service = connection.system_service().clusters_service()
vms_service = connection.system_service().vms_service()
hosts_service = connection.system_service().hosts_service()
cluster_name = module.params['cluster']
cluster = search_by_name(clusters_service, cluster_name)
if cluster is None:
raise Exception("Cluster '%s' was not found." % cluster_name)
cluster_service = clusters_service.cluster_service(cluster.id)
affinity_groups_service = cluster_service.affinity_groups_service()
# Fetch VM ids which should be assigned to affinity group:
vm_ids = sorted([
_get_obj_id(vms_service, vm_name)
for vm_name in module.params['vms']
]) if module.params['vms'] is not None else None
# Fetch host ids which should be assigned to affinity group:
host_ids = sorted([
_get_obj_id(hosts_service, host_name)
for host_name in module.params['hosts']
]) if module.params['hosts'] is not None else None
affinity_groups_module = AffinityGroupsModule(
connection=connection,
module=module,
service=affinity_groups_service,
vm_ids=vm_ids,
host_ids=host_ids,
)
state = module.params['state']
if state == 'present':
ret = affinity_groups_module.create()
elif state == 'absent':
ret = affinity_groups_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=False)
if __name__ == "__main__":
main()
| jcftang/ansible | lib/ansible/modules/cloud/ovirt/ovirt_affinity_groups.py | Python | gpl-3.0 | 10,833 |
from xml.dom.minidom import parseString
tracker_N={}
tracker_S={}
file = open('audiogames.xml','r')
data = file.read()
file.close()
dom = parseString(data)
lang = dom.getElementsByTagName('language')[0].toxml()
kin= dom.getElementsByTagName('number')[0].toxml()
kinects=kin.replace('<number>','').replace('</number>','')
language=lang.replace('<language>','').replace('</language>','')
Name= dom.getElementsByTagName('Name')
ips= dom.getElementsByTagName('ip')
aj= dom.getElementsByTagName('ajuste')[0].toxml()
ports= dom.getElementsByTagName('puerto')
ori=aj.replace('<ajuste>','').replace('</ajuste>','')
for n in Name:
em= n.toxml().replace('<Name>','').replace('</Name>','')
if em == "Norte":
tracker_N['ip']=str(ips[0].toxml().replace('<ip>','').replace('</ip>',''))
tracker_N['port']=str(ports[0].toxml().replace('<puerto>','').replace('</puerto>', ''))
if em == "Sur":
tracker_S['ip']=str(ips[1].toxml().replace('<ip>','').replace('</ip>',''))
tracker_S['port']=str(ports[1].toxml().replace('<puerto>','').replace('</puerto>', ''))
print language, kinects, ori,tracker_N, tracker_S
| husk00/audiogames | argentina/readconf.py | Python | gpl-3.0 | 1,107 |
"""Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
import tarfile
try:
import bz2
del bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive",
"ignore_patterns", "chown", "which"]
# disk_usage is added later, if available on the platform
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registery operation with the archiving
and unpacking registeries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst, *, follow_symlinks=True):
"""Copy data from src to dst.
If follow_symlinks is not set and src is a symbolic link, a new
symlink will be created instead of copying the file it points to.
"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
if not follow_symlinks and os.path.islink(src):
os.symlink(os.readlink(src), dst)
else:
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
return dst
def copymode(src, dst, *, follow_symlinks=True):
"""Copy mode bits from src to dst.
If follow_symlinks is not set, symlinks aren't followed if and only
if both `src` and `dst` are symlinks. If `lchmod` isn't available
(e.g. Linux) this method does nothing.
"""
if not follow_symlinks and os.path.islink(src) and os.path.islink(dst):
if hasattr(os, 'lchmod'):
stat_func, chmod_func = os.lstat, os.lchmod
else:
return
elif hasattr(os, 'chmod'):
stat_func, chmod_func = os.stat, os.chmod
else:
return
st = stat_func(src)
chmod_func(dst, stat.S_IMODE(st.st_mode))
if hasattr(os, 'listxattr'):
def _copyxattr(src, dst, *, follow_symlinks=True):
"""Copy extended filesystem attributes from `src` to `dst`.
Overwrite existing attributes.
If `follow_symlinks` is false, symlinks won't be followed.
"""
for name in os.listxattr(src, follow_symlinks=follow_symlinks):
try:
value = os.getxattr(src, name, follow_symlinks=follow_symlinks)
os.setxattr(dst, name, value, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA):
raise
else:
def _copyxattr(*args, **kwargs):
pass
def copystat(src, dst, *, follow_symlinks=True):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst.
If the optional flag `follow_symlinks` is not set, symlinks aren't followed if and
only if both `src` and `dst` are symlinks.
"""
def _nop(*args, ns=None, follow_symlinks=None):
pass
# follow symlinks (aka don't not follow symlinks)
follow = follow_symlinks or not (os.path.islink(src) and os.path.islink(dst))
if follow:
# use the real function if it exists
def lookup(name):
return getattr(os, name, _nop)
else:
# use the real function only if it exists
# *and* it supports follow_symlinks
def lookup(name):
fn = getattr(os, name, _nop)
if fn in os.supports_follow_symlinks:
return fn
return _nop
st = lookup("stat")(src, follow_symlinks=follow)
mode = stat.S_IMODE(st.st_mode)
lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns),
follow_symlinks=follow)
try:
lookup("chmod")(dst, mode, follow_symlinks=follow)
except NotImplementedError:
# if we got a NotImplementedError, it's because
# * follow_symlinks=False,
# * lchown() is unavailable, and
# * either
# * fchownat() is unvailable or
# * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW.
# (it returned ENOSUP.)
# therefore we're out of options--we simply cannot chown the
# symlink. give up, suppress the error.
# (which is what shutil always did in this circumstance.)
pass
if hasattr(st, 'st_flags'):
try:
lookup("chflags")(dst, st.st_flags, follow_symlinks=follow)
except OSError as why:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and why.errno == getattr(errno, err):
break
else:
raise
_copyxattr(src, dst, follow_symlinks=follow)
def copy(src, dst, *, follow_symlinks=True):
"""Copy data and mode bits ("cp src dst"). Return the file's destination.
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copymode(src, dst, follow_symlinks=follow_symlinks)
return dst
def copy2(src, dst, *, follow_symlinks=True):
"""Copy data and all stat info ("cp -p src dst"). Return the file's
destination."
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copystat(src, dst, follow_symlinks=follow_symlinks)
return dst
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
copystat(srcname, dstname, follow_symlinks=not symlinks)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
return dst
# version vulnerable to race conditions
def _rmtree_unsafe(path, onerror):
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
_rmtree_unsafe(fullname, onerror)
else:
try:
os.unlink(fullname)
except os.error:
onerror(os.unlink, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
# Version using fd-based APIs to protect against races
def _rmtree_safe_fd(topfd, path, onerror):
names = []
try:
names = os.listdir(topfd)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
orig_st = os.stat(name, dir_fd=topfd, follow_symlinks=False)
mode = orig_st.st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
try:
dirfd = os.open(name, os.O_RDONLY, dir_fd=topfd)
except os.error:
onerror(os.open, fullname, sys.exc_info())
else:
try:
if os.path.samestat(orig_st, os.fstat(dirfd)):
_rmtree_safe_fd(dirfd, fullname, onerror)
try:
os.rmdir(name, dir_fd=topfd)
except os.error:
onerror(os.rmdir, fullname, sys.exc_info())
finally:
os.close(dirfd)
else:
try:
os.unlink(name, dir_fd=topfd)
except os.error:
onerror(os.unlink, fullname, sys.exc_info())
_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <=
os.supports_dir_fd and
os.listdir in os.supports_fd and
os.stat in os.supports_follow_symlinks)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = os.lstat(path)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
fd = os.open(path, os.O_RDONLY)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
if (stat.S_ISDIR(orig_st.st_mode) and
os.path.samestat(orig_st, os.fstat(fd))):
_rmtree_safe_fd(fd, path, onerror)
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
else:
raise NotADirectoryError(20,
"Not a directory: '{}'".format(path))
finally:
os.close(fd)
else:
return _rmtree_unsafe(path, onerror)
# Allow introspection of whether or not the hardening against symlink
# attacks is supported on the current platform
rmtree.avoids_symlink_attacks = _use_fd_functions
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command. Return the file or directory's
destination.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed. Symlinks are
recreated under the new name if os.rename() fails because of cross
filesystem renames.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.islink(src):
linkto = os.readlink(src)
os.symlink(linkto, real_dst)
os.unlink(src)
elif os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
return real_dst
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not callable(function):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not callable(function):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registery."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
if hasattr(os, 'statvfs'):
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned value is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
elif os.name == 'nt':
import nt
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned valus is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
total, free = nt._getdiskusage(path)
used = total - free
return _ntuple_diskusage(total, used, free)
def chown(path, user=None, group=None):
"""Change owner user and group of the given path.
user and group can be the uid/gid or the user/group names, and in that case,
they are converted to their respective uid/gid.
"""
if user is None and group is None:
raise ValueError("user and/or group must be set")
_user = user
_group = group
# -1 means don't change it
if user is None:
_user = -1
# user can either be an int (the uid) or a string (the system username)
elif isinstance(user, str):
_user = _get_uid(user)
if _user is None:
raise LookupError("no such user: {!r}".format(user))
if group is None:
_group = -1
elif not isinstance(group, int):
_group = _get_gid(group)
if _group is None:
raise LookupError("no such group: {!r}".format(group))
os.chown(path, _user, _group)
def get_terminal_size(fallback=(80, 24)):
"""Get the size of the terminal window.
For each of the two dimensions, the environment variable, COLUMNS
and LINES respectively, is checked. If the variable is defined and
the value is a positive integer, it is used.
When COLUMNS or LINES is not defined, which is the common case,
the terminal connected to sys.__stdout__ is queried
by invoking os.get_terminal_size.
If the terminal size cannot be successfully queried, either because
the system doesn't support querying, or because we are not
connected to a terminal, the value given in fallback parameter
is used. Fallback defaults to (80, 24) which is the default
size used by many terminal emulators.
The value returned is a named tuple of type os.terminal_size.
"""
# columns, lines are the working values
try:
columns = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
columns = 0
try:
lines = int(os.environ['LINES'])
except (KeyError, ValueError):
lines = 0
# only query if necessary
if columns <= 0 or lines <= 0:
try:
size = os.get_terminal_size(sys.__stdout__.fileno())
except (NameError, OSError):
size = os.terminal_size(fallback)
if columns <= 0:
columns = size.columns
if lines <= 0:
lines = size.lines
return os.terminal_size((columns, lines))
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# Short circuit. If we're given a full path which matches the mode
# and it exists, we're done here.
if _access_check(cmd, mode):
return cmd
path = (path or os.environ.get("PATH", os.defpath)).split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
matches = [cmd for ext in pathext if cmd.lower().endswith(ext.lower())]
# If it does match, only test that one, otherwise we have to try
# others.
files = [cmd] if matches else [cmd + ext.lower() for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
dir = os.path.normcase(dir)
if not dir in seen:
seen.add(dir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
| lfcnassif/MultiContentViewer | release/modules/ext/libreoffice/program/python-core-3.3.0/lib/shutil.py | Python | lgpl-3.0 | 38,079 |
# -*- coding: utf-8 -*-
import logging
import collections
logger = logging.getLogger(__name__)
class WriteAction(object):
def __init__(self, method, *args, **kwargs):
if not callable(method):
raise ValueError('Argument `method` must be callable')
self.method = method
# Note: `args` and `kwargs` must not be mutated after an action is
# enqueued and before it is committed, else awful things can happen
self.args = args
self.kwargs = kwargs
def execute(self):
return self.method(*self.args, **self.kwargs)
def __repr__(self):
return '{0}(*{1}, **{2})'.format(
self.method.__name__,
self.args,
self.kwargs
)
class WriteQueue(object):
def __init__(self):
self.active = False
self.actions = collections.deque()
def start(self):
if self.active:
logger.warn('Already working in a write queue. Further writes '
'will be appended to the current queue.')
self.active = True
def push(self, action):
if not self.active:
raise ValueError('Cannot push unless queue is active')
if not isinstance(action, WriteAction):
raise TypeError('Argument `action` must be instance '
'of `WriteAction`')
self.actions.append(action)
def commit(self):
if not self.active:
raise ValueError('Cannot commit unless queue is active')
results = []
while self.actions:
action = self.actions.popleft()
results.append(action.execute())
return results
def clear(self):
self.active = False
self.actions = collections.deque()
def __nonzero__(self):
return bool(self.actions)
# Python 3
__bool__ = __nonzero__
class QueueContext(object):
def __init__(self, BaseSchema):
self.BaseSchema = BaseSchema
def __enter__(self):
self.BaseSchema.start_queue()
def __exit__(self, exc_type, exc_val, exc_tb):
self.BaseSchema.commit_queue()
| sloria/modular-odm | modularodm/writequeue.py | Python | apache-2.0 | 2,146 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.athena import AWSAthenaHook
from airflow.providers.amazon.aws.sensors.athena import AthenaSensor
class TestAthenaSensor(unittest.TestCase):
def setUp(self):
self.sensor = AthenaSensor(
task_id='test_athena_sensor',
query_execution_id='abc',
sleep_time=5,
max_retries=1,
aws_conn_id='aws_default',
)
@mock.patch.object(AWSAthenaHook, 'poll_query_status', side_effect=("SUCCEEDED",))
def test_poke_success(self, mock_poll_query_status):
assert self.sensor.poke(None)
@mock.patch.object(AWSAthenaHook, 'poll_query_status', side_effect=("RUNNING",))
def test_poke_running(self, mock_poll_query_status):
assert not self.sensor.poke(None)
@mock.patch.object(AWSAthenaHook, 'poll_query_status', side_effect=("QUEUED",))
def test_poke_queued(self, mock_poll_query_status):
assert not self.sensor.poke(None)
@mock.patch.object(AWSAthenaHook, 'poll_query_status', side_effect=("FAILED",))
def test_poke_failed(self, mock_poll_query_status):
with pytest.raises(AirflowException) as ctx:
self.sensor.poke(None)
assert 'Athena sensor failed' in str(ctx.value)
@mock.patch.object(AWSAthenaHook, 'poll_query_status', side_effect=("CANCELLED",))
def test_poke_cancelled(self, mock_poll_query_status):
with pytest.raises(AirflowException) as ctx:
self.sensor.poke(None)
assert 'Athena sensor failed' in str(ctx.value)
| apache/incubator-airflow | tests/providers/amazon/aws/sensors/test_athena.py | Python | apache-2.0 | 2,442 |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import time
import uuid
from azure.storage import (
Metrics,
CorsRule,
)
from azure.common import (
AzureConflictHttpError,
AzureMissingResourceHttpError,
)
class ShareSamples():
def __init__(self, account):
self.account = account
def run_all_samples(self):
self.service = self.account.create_file_service()
self.create_share()
self.delete_share()
self.share_metadata()
self.share_properties()
self.share_stats()
self.share_exists()
self.list_shares()
# This method contains sleeps, so don't run by default
# self.service_properties()
def _get_share_reference(self, prefix='share'):
return '{}{}'.format(prefix, str(uuid.uuid4()).replace('-', ''))
def _create_share(self, prefix='share'):
share_name = self._get_share_reference(prefix)
self.service.create_share(share_name)
return share_name
def create_share(self):
# Basic
share_name1 = self._get_share_reference()
created = self.service.create_share(share_name1) # True
# Quota
share_name2 = self._get_share_reference()
created = self.service.create_share(share_name2, quota=1) # True
# Metadata
metadata = {'val1': 'foo', 'val2': 'blah'}
share_name3 = self._get_share_reference()
created = self.service.create_share(share_name3, metadata=metadata) # True
# Fail on exist
share_name4 = self._get_share_reference()
created = self.service.create_share(share_name4) # True
created = self.service.create_share(share_name4) # False
try:
self.service.create_share(share_name4, fail_on_exist=True)
except AzureConflictHttpError:
pass
self.service.delete_share(share_name1)
self.service.delete_share(share_name2)
self.service.delete_share(share_name3)
self.service.delete_share(share_name4)
def delete_share(self):
# Basic
share_name = self._create_share()
deleted = self.service.delete_share(share_name) # True
# Fail not exist
share_name = self._get_share_reference()
deleted = self.service.delete_share(share_name) # False
try:
self.service.delete_share(share_name, fail_not_exist=True)
except AzureMissingResourceHttpError:
pass
def share_metadata(self):
share_name = self._create_share()
metadata = {'val1': 'foo', 'val2': 'blah'}
# Basic
self.service.set_share_metadata(share_name, metadata=metadata)
metadata = self.service.get_share_metadata(share_name) # metadata={'val1': 'foo', 'val2': 'blah'}
# Replaces values, does not merge
metadata = {'new': 'val'}
self.service.set_share_metadata(share_name, metadata=metadata)
metadata = self.service.get_share_metadata(share_name) # metadata={'new': 'val'}
# Capital letters
metadata = {'NEW': 'VAL'}
self.service.set_share_metadata(share_name, metadata=metadata)
metadata = self.service.get_share_metadata(share_name) # metadata={'new': 'VAL'}
# Clearing
self.service.set_share_metadata(share_name)
metadata = self.service.get_share_metadata(share_name) # metadata={}
self.service.delete_share(share_name)
def share_properties(self):
share_name = self._create_share()
metadata = {'val1': 'foo', 'val2': 'blah'}
# Basic
# Sets the share quota to 1 GB
self.service.set_share_properties(share_name, 1)
share = self.service.get_share_properties(share_name)
quota = share.properties.quota # 1
# Metadata
self.service.set_share_metadata(share_name, metadata=metadata)
share = self.service.get_share_properties(share_name)
metadata = share.metadata # metadata={'val1': 'foo', 'val2': 'blah'}
self.service.delete_share(share_name)
def share_stats(self):
share_name = self._create_share()
self.service.create_file_from_text(share_name, None, 'file1', b'hello world')
# Basic
share_usage = self.service.get_share_stats(share_name) # 1
self.service.delete_share(share_name)
def share_exists(self):
share_name = self._get_share_reference()
# Basic
exists = self.service.exists(share_name) # False
self.service.create_share(share_name)
exists = self.service.exists(share_name) # True
self.service.delete_share(share_name)
def list_shares(self):
share_name1 = self._get_share_reference()
self.service.create_share('share1', metadata={'val1': 'foo', 'val2': 'blah'})
share_name2 = self._create_share('share2')
share_name3 = self._create_share('thirdshare')
# Basic
# Commented out as this will list every share in your account
# shares = list(self.service.list_shares())
# for share in shares:
# print(share.name) # share1, share2, thirdq, all other shares created in the service
# Num results
# Will return in alphabetical order.
shares = list(self.service.list_shares(num_results=2))
for share in shares:
print(share.name) # share1, share2
# Prefix
shares = list(self.service.list_shares(prefix='share'))
for share in shares:
print(share.name) # share1, share2
# Metadata
shares = list(self.service.list_shares(prefix='share', include_metadata=True))
share = next((q for q in shares if q.name == 'share1'), None)
metadata = share.metadata # {'val1': 'foo', 'val2': 'blah'}
self.service.delete_share(share_name1)
self.service.delete_share(share_name2)
self.service.delete_share(share_name3)
def service_properties(self):
# Basic
self.service.set_file_service_properties(hour_metrics=Metrics(enabled=True, include_apis=True),
minute_metrics=Metrics(enabled=True, include_apis=False),
cors=[CorsRule(allowed_origins=['*'], allowed_methods=['GET'])])
# Wait 30 seconds for settings to propagate
time.sleep(30)
props = self.service.get_file_service_properties() # props = ServiceProperties() w/ all properties specified above
# Omitted properties will not overwrite what's already on the self.service
# Empty properties will clear
self.service.set_file_service_properties(cors=[])
# Wait 30 seconds for settings to propagate
time.sleep(30)
props = self.service.get_file_service_properties() # props = ServiceProperties() w/ CORS rules cleared
| jehine-MSFT/azure-storage-python | samples/file/share_usage.py | Python | apache-2.0 | 7,607 |
from rest_framework.exceptions import APIException
from rest_framework.exceptions import PermissionDenied as RestFrameworkPermissionDenied
class XOSProgrammingError(APIException):
status_code=400
def __init__(self, why="programming error", fields={}):
APIException.__init__(self, {"error": "XOSProgrammingError",
"specific_error": why,
"fields": fields})
class XOSPermissionDenied(RestFrameworkPermissionDenied):
def __init__(self, why="permission error", fields={}):
APIException.__init__(self, {"error": "XOSPermissionDenied",
"specific_error": why,
"fields": fields})
class XOSNotAuthenticated(RestFrameworkPermissionDenied):
def __init__(self, why="you must be authenticated to use this api", fields={}):
APIException.__init__(self, {"error": "XOSNotAuthenticated",
"specific_error": why,
"fields": fields})
class XOSNotFound(RestFrameworkPermissionDenied):
status_code=404
def __init__(self, why="object not found", fields={}):
APIException.__init__(self, {"error": "XOSNotFound",
"specific_error": why,
"fields": fields})
class XOSValidationError(APIException):
status_code=403
def __init__(self, why="validation error", fields={}):
APIException.__init__(self, {"error": "XOSValidationError",
"specific_error": why,
"fields": fields})
class XOSDuplicateKey(APIException):
status_code=400
def __init__(self, why="duplicate key", fields={}):
APIException.__init__(self, {"error": "XOSDuplicateKey",
"specific_error": why,
"fields": fields})
class XOSMissingField(APIException):
status_code=400
def __init__(self, why="missing field", fields={}):
APIException.__init__(self, {"error": "XOSMissingField",
"specific_error": why,
"fields": fields})
class XOSConfigurationError(APIException):
status_code=400
def __init__(self, why="configuration error", fields={}):
APIException.__init__(self, {"error": "XOSConfigurationError",
"specific_error": why,
"fields": fields})
class XOSConflictingField(APIException):
status_code=400
def __init__(self, why="conflicting field", fields={}):
APIException.__init__(self, {"error": "XOSMissingField",
"specific_error": why,
"fields": fields})
class XOSServiceUnavailable(APIException):
status_code=503
def __init__(self, why="Service temporarily unavailable, try again later", fields={}):
APIException.__init__(self, {"error": "XOSServiceUnavailable",
"specific_error": why,
"fields": fields})
| xmaruto/mcord | xos/xos/exceptions.py | Python | apache-2.0 | 3,056 |
"""
Compare Image Tests
-------------------
This script compares all the mis-matching images found when running
$ nosetests astroML_fig_tests
The result of running this script is an html page comparing each output file
to the baseline result, showing only the ones with a mismatch above the
threshold specified in astroML_fig_tests.
"""
import os
TABLE = """
<html>
<table>
{rows}
</table>
</html>
"""
ROW = """
<tr>
<td align="center">{0}</td>
<td align="center">actual</td>
<td align="center">baseline</td>
</tr>
<tr>
<td><img src="{1}" width="100%"></td>
<td><img src="{2}" width="100%"></td>
<td><img src="{3}" width="100%"></td>
</tr>
"""
baseline = "astroML_fig_tests/baseline/book_figures"
results = "astroML_fig_tests/results/book_figures"
figlist = []
for chapter in os.listdir(results):
if not os.path.isdir(os.path.join(results,chapter)):
continue
for pyfile in os.listdir(os.path.join(results,chapter)):
if pyfile.endswith('failed-diff.png'):
root = pyfile.split('-failed-diff')[0]
figlist.append((os.path.join("book_figures", chapter, root + ".py"),
os.path.join(results, chapter, pyfile),
os.path.join(results, chapter, root + '.png'),
os.path.join(baseline, chapter, root + '.png')))
outfile = "_compare_images.html"
with open(outfile, 'w') as f:
f.write(TABLE.format(rows = '\n'.join([ROW.format(*figs, width="90%")
for figs in figlist])))
import webbrowser
webbrowser.open_new("file://localhost" + os.path.abspath(outfile))
| nhuntwalker/astroML | compare_images.py | Python | bsd-2-clause | 1,676 |
"""Simple validation of specifications passed to slybot"""
from os.path import dirname, join
import json
import rfc3987
from urlparse import urlparse, parse_qsl
from urllib import urlencode
from urllib2 import unquote
from jsonschema import Draft3Validator, RefResolver, FormatChecker
_PATH = dirname(__file__)
def load_schemas():
filename = join(_PATH, "schemas.json")
return dict((s["id"], s) for s in json.load(open(filename)))
_SCHEMAS = load_schemas()
class SlybotJsonSchemaValidator(Draft3Validator):
DEFAULT_TYPES = Draft3Validator.DEFAULT_TYPES.copy()
DEFAULT_TYPES.update({
"mapping": dict,
})
def get_schema_validator(schema):
resolver = RefResolver("", schema, _SCHEMAS)
@FormatChecker.cls_checks('url', (ValueError,))
def is_valid_uri(instance):
if not isinstance(instance, basestring):
return True
uri = urlparse(instance)
query = urlencode(parse_qsl(unquote(uri.query.encode('utf-8'))))
return rfc3987.parse(uri._replace(query=query).geturl(),
rule='URI')
return SlybotJsonSchemaValidator(_SCHEMAS[schema], resolver=resolver,
format_checker=FormatChecker())
def validate_project_schema(specs):
project = specs["project"]
get_schema_validator("project").validate(project)
items = specs["items"]
get_schema_validator("items").validate(items)
extractors = specs["extractors"]
get_schema_validator("extractors").validate(extractors)
spider_schema_validator = get_schema_validator("spider")
for spider in specs["spiders"].values():
spider_schema_validator.validate(spider)
return True
| flip111/portia | slybot/slybot/validation/schema.py | Python | bsd-3-clause | 1,712 |
from __future__ import division, absolute_import, print_function
import sys
import time
from datetime import date
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_allclose, assert_raises,
)
from numpy.lib._iotools import (
LineSplitter, NameValidator, StringConverter,
has_nested_fields, easy_dtype, flatten_dtype
)
from numpy.compat import unicode
class TestLineSplitter(object):
"Tests the LineSplitter class."
def test_no_delimiter(self):
"Test LineSplitter w/o delimiter"
strg = " 1 2 3 4 5 # test"
test = LineSplitter()(strg)
assert_equal(test, ['1', '2', '3', '4', '5'])
test = LineSplitter('')(strg)
assert_equal(test, ['1', '2', '3', '4', '5'])
def test_space_delimiter(self):
"Test space delimiter"
strg = " 1 2 3 4 5 # test"
test = LineSplitter(' ')(strg)
assert_equal(test, ['1', '2', '3', '4', '', '5'])
test = LineSplitter(' ')(strg)
assert_equal(test, ['1 2 3 4', '5'])
def test_tab_delimiter(self):
"Test tab delimiter"
strg = " 1\t 2\t 3\t 4\t 5 6"
test = LineSplitter('\t')(strg)
assert_equal(test, ['1', '2', '3', '4', '5 6'])
strg = " 1 2\t 3 4\t 5 6"
test = LineSplitter('\t')(strg)
assert_equal(test, ['1 2', '3 4', '5 6'])
def test_other_delimiter(self):
"Test LineSplitter on delimiter"
strg = "1,2,3,4,,5"
test = LineSplitter(',')(strg)
assert_equal(test, ['1', '2', '3', '4', '', '5'])
#
strg = " 1,2,3,4,,5 # test"
test = LineSplitter(',')(strg)
assert_equal(test, ['1', '2', '3', '4', '', '5'])
def test_constant_fixed_width(self):
"Test LineSplitter w/ fixed-width fields"
strg = " 1 2 3 4 5 # test"
test = LineSplitter(3)(strg)
assert_equal(test, ['1', '2', '3', '4', '', '5', ''])
#
strg = " 1 3 4 5 6# test"
test = LineSplitter(20)(strg)
assert_equal(test, ['1 3 4 5 6'])
#
strg = " 1 3 4 5 6# test"
test = LineSplitter(30)(strg)
assert_equal(test, ['1 3 4 5 6'])
def test_variable_fixed_width(self):
strg = " 1 3 4 5 6# test"
test = LineSplitter((3, 6, 6, 3))(strg)
assert_equal(test, ['1', '3', '4 5', '6'])
#
strg = " 1 3 4 5 6# test"
test = LineSplitter((6, 6, 9))(strg)
assert_equal(test, ['1', '3 4', '5 6'])
# -----------------------------------------------------------------------------
class TestNameValidator(object):
def test_case_sensitivity(self):
"Test case sensitivity"
names = ['A', 'a', 'b', 'c']
test = NameValidator().validate(names)
assert_equal(test, ['A', 'a', 'b', 'c'])
test = NameValidator(case_sensitive=False).validate(names)
assert_equal(test, ['A', 'A_1', 'B', 'C'])
test = NameValidator(case_sensitive='upper').validate(names)
assert_equal(test, ['A', 'A_1', 'B', 'C'])
test = NameValidator(case_sensitive='lower').validate(names)
assert_equal(test, ['a', 'a_1', 'b', 'c'])
# check exceptions
assert_raises(ValueError, NameValidator, case_sensitive='foobar')
def test_excludelist(self):
"Test excludelist"
names = ['dates', 'data', 'Other Data', 'mask']
validator = NameValidator(excludelist=['dates', 'data', 'mask'])
test = validator.validate(names)
assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_'])
def test_missing_names(self):
"Test validate missing names"
namelist = ('a', 'b', 'c')
validator = NameValidator()
assert_equal(validator(namelist), ['a', 'b', 'c'])
namelist = ('', 'b', 'c')
assert_equal(validator(namelist), ['f0', 'b', 'c'])
namelist = ('a', 'b', '')
assert_equal(validator(namelist), ['a', 'b', 'f0'])
namelist = ('', 'f0', '')
assert_equal(validator(namelist), ['f1', 'f0', 'f2'])
def test_validate_nb_names(self):
"Test validate nb names"
namelist = ('a', 'b', 'c')
validator = NameValidator()
assert_equal(validator(namelist, nbfields=1), ('a',))
assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"),
['a', 'b', 'c', 'g0', 'g1'])
def test_validate_wo_names(self):
"Test validate no names"
namelist = None
validator = NameValidator()
assert_(validator(namelist) is None)
assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2'])
# -----------------------------------------------------------------------------
def _bytes_to_date(s):
return date(*time.strptime(s, "%Y-%m-%d")[:3])
class TestStringConverter(object):
"Test StringConverter"
def test_creation(self):
"Test creation of a StringConverter"
converter = StringConverter(int, -99999)
assert_equal(converter._status, 1)
assert_equal(converter.default, -99999)
def test_upgrade(self):
"Tests the upgrade method."
converter = StringConverter()
assert_equal(converter._status, 0)
# test int
assert_equal(converter.upgrade('0'), 0)
assert_equal(converter._status, 1)
# On systems where long defaults to 32-bit, the statuses will be
# offset by one, so we check for this here.
import numpy.core.numeric as nx
status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize)
# test int > 2**32
assert_equal(converter.upgrade('17179869184'), 17179869184)
assert_equal(converter._status, 1 + status_offset)
# test float
assert_allclose(converter.upgrade('0.'), 0.0)
assert_equal(converter._status, 2 + status_offset)
# test complex
assert_equal(converter.upgrade('0j'), complex('0j'))
assert_equal(converter._status, 3 + status_offset)
# test str
# note that the longdouble type has been skipped, so the
# _status increases by 2. Everything should succeed with
# unicode conversion (5).
for s in ['a', u'a', b'a']:
res = converter.upgrade(s)
assert_(type(res) is unicode)
assert_equal(res, u'a')
assert_equal(converter._status, 5 + status_offset)
def test_missing(self):
"Tests the use of missing values."
converter = StringConverter(missing_values=('missing',
'missed'))
converter.upgrade('0')
assert_equal(converter('0'), 0)
assert_equal(converter(''), converter.default)
assert_equal(converter('missing'), converter.default)
assert_equal(converter('missed'), converter.default)
try:
converter('miss')
except ValueError:
pass
def test_upgrademapper(self):
"Tests updatemapper"
dateparser = _bytes_to_date
StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
convert = StringConverter(dateparser, date(2000, 1, 1))
test = convert('2001-01-01')
assert_equal(test, date(2001, 1, 1))
test = convert('2009-01-01')
assert_equal(test, date(2009, 1, 1))
test = convert('')
assert_equal(test, date(2000, 1, 1))
def test_string_to_object(self):
"Make sure that string-to-object functions are properly recognized"
old_mapper = StringConverter._mapper[:] # copy of list
conv = StringConverter(_bytes_to_date)
assert_equal(conv._mapper, old_mapper)
assert_(hasattr(conv, 'default'))
def test_keep_default(self):
"Make sure we don't lose an explicit default"
converter = StringConverter(None, missing_values='',
default=-999)
converter.upgrade('3.14159265')
assert_equal(converter.default, -999)
assert_equal(converter.type, np.dtype(float))
#
converter = StringConverter(
None, missing_values='', default=0)
converter.upgrade('3.14159265')
assert_equal(converter.default, 0)
assert_equal(converter.type, np.dtype(float))
def test_keep_default_zero(self):
"Check that we don't lose a default of 0"
converter = StringConverter(int, default=0,
missing_values="N/A")
assert_equal(converter.default, 0)
def test_keep_missing_values(self):
"Check that we're not losing missing values"
converter = StringConverter(int, default=0,
missing_values="N/A")
assert_equal(
converter.missing_values, set(['', 'N/A']))
def test_int64_dtype(self):
"Check that int64 integer types can be specified"
converter = StringConverter(np.int64, default=0)
val = "-9223372036854775807"
assert_(converter(val) == -9223372036854775807)
val = "9223372036854775807"
assert_(converter(val) == 9223372036854775807)
def test_uint64_dtype(self):
"Check that uint64 integer types can be specified"
converter = StringConverter(np.uint64, default=0)
val = "9223372043271415339"
assert_(converter(val) == 9223372043271415339)
class TestMiscFunctions(object):
def test_has_nested_dtype(self):
"Test has_nested_dtype"
ndtype = np.dtype(float)
assert_equal(has_nested_fields(ndtype), False)
ndtype = np.dtype([('A', '|S3'), ('B', float)])
assert_equal(has_nested_fields(ndtype), False)
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
assert_equal(has_nested_fields(ndtype), True)
def test_easy_dtype(self):
"Test ndtype on dtypes"
# Simple case
ndtype = float
assert_equal(easy_dtype(ndtype), np.dtype(float))
# As string w/o names
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype),
np.dtype([('f0', "i4"), ('f1', "f8")]))
# As string w/o names but different default format
assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"),
np.dtype([('field_000', "i4"), ('field_001', "f8")]))
# As string w/ names
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names="a, b"),
np.dtype([('a', "i4"), ('b', "f8")]))
# As string w/ names (too many)
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([('a', "i4"), ('b', "f8")]))
# As string w/ names (not enough)
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names=", b"),
np.dtype([('f0', "i4"), ('b', "f8")]))
# ... (with different default format)
assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"),
np.dtype([('a', "i4"), ('f00', "f8")]))
# As list of tuples w/o names
ndtype = [('A', int), ('B', float)]
assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)]))
# As list of tuples w/ names
assert_equal(easy_dtype(ndtype, names="a,b"),
np.dtype([('a', int), ('b', float)]))
# As list of tuples w/ not enough names
assert_equal(easy_dtype(ndtype, names="a"),
np.dtype([('a', int), ('f0', float)]))
# As list of tuples w/ too many names
assert_equal(easy_dtype(ndtype, names="a,b,c"),
np.dtype([('a', int), ('b', float)]))
# As list of types w/o names
ndtype = (int, float, float)
assert_equal(easy_dtype(ndtype),
np.dtype([('f0', int), ('f1', float), ('f2', float)]))
# As list of types w names
ndtype = (int, float, float)
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([('a', int), ('b', float), ('c', float)]))
# As simple dtype w/ names
ndtype = np.dtype(float)
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([(_, float) for _ in ('a', 'b', 'c')]))
# As simple dtype w/o names (but multiple fields)
ndtype = np.dtype(float)
assert_equal(
easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"),
np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')]))
def test_flatten_dtype(self):
"Testing flatten_dtype"
# Standard dtype
dt = np.dtype([("a", "f8"), ("b", "f8")])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, float])
# Recursive dtype
dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int])
# dtype with shaped fields
dt = np.dtype([("a", (float, 2)), ("b", (int, 3))])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, int])
dt_flat = flatten_dtype(dt, True)
assert_equal(dt_flat, [float] * 2 + [int] * 3)
# dtype w/ titles
dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, float])
| tynn/numpy | numpy/lib/tests/test__iotools.py | Python | bsd-3-clause | 13,506 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
import subprocess
def RunCmdAndCheck(cmd, err_string, output_api, cwd=None):
results = []
p = subprocess.Popen(cmd, cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(p_stdout, p_stderr) = p.communicate()
if p.returncode:
results.append(
output_api.PresubmitError(err_string,
long_text=p_stderr))
return results
def RunUnittests(input_api, output_api):
# Run some Generator unittests if the generator source was changed.
results = []
files = input_api.LocalPaths()
generator_files = []
for filename in files:
name_parts = filename.split(os.sep)
if name_parts[0:2] == ['ppapi', 'generators']:
generator_files.append(filename)
if generator_files != []:
cmd = [ sys.executable, 'idl_gen_pnacl.py', '--wnone', '--test']
ppapi_dir = input_api.PresubmitLocalPath()
results.extend(RunCmdAndCheck(cmd,
'PPAPI IDL Pnacl unittest failed.',
output_api,
os.path.join(ppapi_dir, 'generators')))
return results
# If any .srpc files were changed, run run_srpcgen.py --diff_mode.
def CheckSrpcChange(input_api, output_api):
if [True for filename in input_api.LocalPaths() if
os.path.splitext(filename)[1] == '.srpc']:
return RunCmdAndCheck([sys.executable,
os.path.join(input_api.PresubmitLocalPath(),
'native_client', 'src',
'shared', 'ppapi_proxy',
'run_srpcgen.py'),
'--diff_mode'],
'PPAPI SRPC Diff detected: Run run_srpcgen.py.',
output_api)
return []
# Verify that the files do not contain a 'TODO' in them.
RE_TODO = re.compile(r'\WTODO\W', flags=re.I)
def CheckTODO(input_api, output_api):
files = input_api.LocalPaths()
todo = []
for filename in files:
name, ext = os.path.splitext(filename)
name_parts = name.split(os.sep)
# Only check normal build sources.
if ext not in ['.h', '.cc', '.idl']:
continue
# Only examine the ppapi directory.
if name_parts[0] != 'ppapi':
continue
# Only examine public plugin facing directories.
if name_parts[1] not in ['api', 'c', 'cpp', 'utility']:
continue
# Only examine public stable interfaces.
if name_parts[2] in ['dev', 'private', 'trusted']:
continue
filepath = os.path.join('..', filename)
if RE_TODO.search(open(filepath, 'rb').read()):
todo.append(filename)
if todo:
return [output_api.PresubmitError(
'TODOs found in stable public PPAPI files:',
long_text='\n'.join(todo))]
return []
# Verify that no CPP wrappers use un-versioned PPB interface name macros.
RE_UNVERSIONED_PPB = re.compile(r'\bPPB_\w+_INTERFACE\b')
def CheckUnversionedPPB(input_api, output_api):
files = input_api.LocalPaths()
todo = []
for filename in files:
name, ext = os.path.splitext(filename)
name_parts = name.split(os.sep)
# Only check C++ sources.
if ext not in ['.cc']:
continue
# Only examine the public plugin facing ppapi/cpp directory.
if name_parts[0:2] != ['ppapi', 'cpp']:
continue
# Only examine public stable and trusted interfaces.
if name_parts[2] in ['dev', 'private']:
continue
filepath = os.path.join('..', filename)
if RE_UNVERSIONED_PPB.search(open(filepath, 'rb').read()):
todo.append(filename)
if todo:
return [output_api.PresubmitError(
'Unversioned PPB interface references found in PPAPI C++ wrappers:',
long_text='\n'.join(todo))]
return []
def CheckChange(input_api, output_api):
results = []
results.extend(CheckSrpcChange(input_api, output_api))
results.extend(RunUnittests(input_api, output_api))
results.extend(CheckTODO(input_api, output_api))
results.extend(CheckUnversionedPPB(input_api, output_api))
# Verify all modified *.idl have a matching *.h
files = input_api.LocalPaths()
h_files = []
idl_files = []
# Find all relevant .h and .idl files.
for filename in files:
name, ext = os.path.splitext(filename)
name_parts = name.split(os.sep)
if name_parts[0:2] == ['ppapi', 'c'] and ext == '.h':
h_files.append('/'.join(name_parts[2:]))
if name_parts[0:2] == ['ppapi', 'api'] and ext == '.idl':
idl_files.append('/'.join(name_parts[2:]))
# Generate a list of all appropriate *.h and *.idl changes in this CL.
both = h_files + idl_files
# If there aren't any, we are done checking.
if not both: return results
missing = []
for filename in idl_files:
if filename not in set(h_files):
missing.append(' ppapi/c/%s.idl' % filename)
if missing:
results.append(
output_api.PresubmitPromptWarning(
'Missing PPAPI header, no change or skipped generation?',
long_text='\n'.join(missing)))
missing_dev = []
missing_stable = []
missing_priv = []
for filename in h_files:
if filename not in set(idl_files):
name_parts = filename.split(os.sep)
if 'trusted' in name_parts:
missing_priv.append(' ppapi/c/%s.h' % filename)
continue
if 'private' in name_parts:
missing_priv.append(' ppapi/c/%s.h' % filename)
continue
if 'dev' in name_parts:
missing_dev.append(' ppapi/c/%s.h' % filename)
continue
missing_stable.append(' ppapi/c/%s.h' % filename)
if missing_priv:
results.append(
output_api.PresubmitPromptWarning(
'Missing PPAPI IDL for private interface, please generate IDL:',
long_text='\n'.join(missing_priv)))
if missing_dev:
results.append(
output_api.PresubmitPromptWarning(
'Missing PPAPI IDL for DEV, required before moving to stable:',
long_text='\n'.join(missing_dev)))
if missing_stable:
results.append(
output_api.PresubmitError(
'Missing PPAPI IDL for stable interface:',
long_text='\n'.join(missing_stable)))
# Verify all *.h files match *.idl definitions, use:
# --test to prevent output to disk
# --diff to generate a unified diff
# --out to pick which files to examine (only the ones in the CL)
ppapi_dir = input_api.PresubmitLocalPath()
cmd = [sys.executable, 'generator.py',
'--wnone', '--diff', '--test','--cgen', '--range=start,end']
# Only generate output for IDL files references (as *.h or *.idl) in this CL
cmd.append('--out=' + ','.join([name + '.idl' for name in both]))
cmd_results = RunCmdAndCheck(cmd,
'PPAPI IDL Diff detected: Run the generator.',
output_api,
os.path.join(ppapi_dir, 'generators'))
if cmd_results:
results.extend(cmd_results)
return results
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
| keishi/chromium | ppapi/PRESUBMIT.py | Python | bsd-3-clause | 7,397 |
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.contenttypes.models import ContentType
from .conf import settings
def load_media_defaults():
media = []
defaults = {}
for key, backend in settings.PINAX_NOTIFICATIONS_BACKENDS.items():
# key is a tuple (medium_id, backend_label)
media.append(key)
defaults[key[0]] = backend.spam_sensitivity
return media, defaults
def notice_setting_for_user(user, notice_type, medium, scoping=None):
"""
@@@ candidate for overriding via a hookset method so you can customize lookup at site level
"""
kwargs = {
"notice_type": notice_type,
"medium": medium
}
if scoping:
kwargs.update({
"scoping_content_type": ContentType.objects.get_for_model(scoping),
"scoping_object_id": scoping.pk
})
else:
kwargs.update({
"scoping_content_type__isnull": True,
"scoping_object_id__isnull": True
})
try:
return user.noticesetting_set.get(**kwargs)
except ObjectDoesNotExist:
_, NOTICE_MEDIA_DEFAULTS = load_media_defaults()
if scoping is None:
kwargs.pop("scoping_content_type__isnull")
kwargs.pop("scoping_object_id__isnull")
kwargs.update({
"scoping_content_type": None,
"scoping_object_id": None
})
default = (NOTICE_MEDIA_DEFAULTS[medium] <= notice_type.default)
kwargs.update({"send": default})
setting = user.noticesetting_set.create(**kwargs)
return setting
| coldmind/pinax-notifications | pinax/notifications/utils.py | Python | mit | 1,624 |
# -*- coding: utf-8 -*-
# =================================================================
#
# Authors: Tom Kralidis <[email protected]>
#
# Copyright (c) 2015 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
from pycsw.core import util
from pycsw.core.etree import etree
from pycsw.ogc.fes import fes1
class Sru(object):
"""SRU wrapper class"""
def __init__(self, context):
self.sru_version = '1.1'
self.namespaces = {
'zd': 'http://www.loc.gov/zing/srw/diagnostic/',
'sru': 'http://www.loc.gov/zing/srw/',
'zr': 'http://explain.z3950.org/dtd/2.1/',
'zs': 'http://www.loc.gov/zing/srw/',
'srw_dc': 'info:srw/schema/1/dc-schema'
}
self.mappings = {
'csw:Record': {
'schema': {
'name': 'dc',
'identifier': 'info:srw/cql-context-set/1/dc-v1.1',
},
'index': {
# map OGC queryables to XPath expressions
'title': '4',
'creator': '1003',
'subject': '29',
'abstract': '62',
'publisher': '1018',
'contributor': 'TBD',
'modified': 'TBD',
'date': '30',
'type': '1031',
'format': '1034',
'identifier': '12',
'source': 'TBD',
'language': 'TBD',
'relation': 'TBD',
'rights': 'TBD',
# bbox and full text map to internal fixed columns
#'ows:BoundingBox': 'bbox',
#'csw:AnyText': 'xml'
}
}
}
self.context = context
self.context.namespaces.update(self.namespaces)
def request_sru2csw(self, kvpin):
"""transform an SRU request into a CSW request"""
kvpout = {'service': 'CSW', 'version': '2.0.2', 'mode': 'sru'}
if 'operation' in kvpin:
if kvpin['operation'] == 'explain':
kvpout['request'] = 'GetCapabilities'
elif kvpin['operation'] == 'searchRetrieve':
kvpout['request'] = 'GetRecords'
if 'startrecord' in kvpin:
kvpout['startposition'] = int(kvpin['startrecord'])
if 'maximumrecords' in kvpin:
kvpout['maxrecords'] = int(kvpin['maximumrecords'])
else:
kvpout['maxrecords'] = 0
# TODO: make smarter typename fetching
kvpout['typenames'] = 'csw:Record'
kvpout['elementsetname'] = 'brief'
kvpout['constraintlanguage'] = 'CQL_TEXT'
kvpout['resulttype'] = 'results'
if 'query' in kvpin:
pname_in_query = False
for coops in fes1.MODEL['ComparisonOperators'].keys():
if kvpin['query'].find(fes1.MODEL['ComparisonOperators'][coops]['opvalue']) != -1:
pname_in_query = True
break
kvpout['constraint'] = {'type': 'cql'}
if not pname_in_query:
kvpout['constraint'] = 'csw:AnyText like \'%%%s%%\'' % kvpin['query']
else:
kvpout['constraint'] = kvpin['query']
else:
kvpout['request'] = 'GetCapabilities'
return kvpout
def response_csw2sru(self, element, environ):
"""transform a CSW response into an SRU response"""
response_name = etree.QName(element).localname
if response_name == 'Capabilities': # explain
node = etree.Element(util.nspath_eval('sru:explainResponse', self.namespaces), nsmap=self.namespaces)
etree.SubElement(node, util.nspath_eval('sru:version', self.namespaces)).text = self.sru_version
record = etree.SubElement(node, util.nspath_eval('sru:record', self.namespaces))
etree.SubElement(record, util.nspath_eval('sru:recordPacking', self.namespaces)).text = 'XML'
etree.SubElement(record, util.nspath_eval('sru:recordSchema', self.namespaces)).text = 'http://explain.z3950.org/dtd/2.1/'
recorddata = etree.SubElement(record, util.nspath_eval('sru:recordData', self.namespaces))
explain = etree.SubElement(recorddata, util.nspath_eval('zr:explain', self.namespaces))
serverinfo = etree.SubElement(explain, util.nspath_eval('zr:serverInfo', self.namespaces), protocol='SRU', version=self.sru_version, transport='http', method='GET POST SOAP')
etree.SubElement(serverinfo, util.nspath_eval('zr:host', self.namespaces)).text = environ.get('HTTP_HOST', environ["SERVER_NAME"]) # WSGI allows for either of these
etree.SubElement(serverinfo, util.nspath_eval('zr:port', self.namespaces)).text = environ['SERVER_PORT']
etree.SubElement(serverinfo, util.nspath_eval('zr:database', self.namespaces)).text = 'pycsw'
databaseinfo = etree.SubElement(explain, util.nspath_eval('zr:databaseInfo', self.namespaces))
etree.SubElement(databaseinfo, util.nspath_eval('zr:title', self.namespaces), lang='en', primary='true').text = element.xpath('//ows:Title|//ows20:Title', namespaces=self.context.namespaces)[0].text
etree.SubElement(databaseinfo, util.nspath_eval('zr:description', self.namespaces), lang='en', primary='true').text = element.xpath('//ows:Abstract|//ows20:Abstract', namespaces=self.context.namespaces)[0].text
indexinfo = etree.SubElement(explain, util.nspath_eval('zr:indexInfo', self.namespaces))
etree.SubElement(indexinfo, util.nspath_eval('zr:set', self.namespaces), name='dc', identifier='info:srw/cql-context-set/1/dc-v1.1')
for key, value in sorted(self.mappings['csw:Record']['index'].items()):
zrindex = etree.SubElement(indexinfo, util.nspath_eval('zr:index', self.namespaces), id=value)
etree.SubElement(zrindex, util.nspath_eval('zr:title', self.namespaces)).text = key
zrmap = etree.SubElement(zrindex, util.nspath_eval('zr:map', self.namespaces))
etree.SubElement(zrmap, util.nspath_eval('zr:map', self.namespaces), set='dc').text = key
zrindex = etree.SubElement(indexinfo, util.nspath_eval('zr:index', self.namespaces))
zrmap = etree.SubElement(zrindex, util.nspath_eval('zr:map', self.namespaces))
etree.SubElement(zrmap, util.nspath_eval('zr:name', self.namespaces), set='dc').text = 'title222'
schemainfo = etree.SubElement(explain, util.nspath_eval('zr:schemaInfo', self.namespaces))
zrschema = etree.SubElement(schemainfo, util.nspath_eval('zr:schema', self.namespaces), name='dc', identifier='info:srw/schema/1/dc-v1.1')
etree.SubElement(zrschema, util.nspath_eval('zr:title', self.namespaces)).text = 'Simple Dublin Core'
configinfo = etree.SubElement(explain, util.nspath_eval('zr:configInfo', self.namespaces))
etree.SubElement(configinfo, util.nspath_eval('zr:default', self.namespaces), type='numberOfRecords').text = '0'
elif response_name == 'GetRecordsResponse':
recpos = int(element.xpath('//@nextRecord')[0]) - int(element.xpath('//@numberOfRecordsReturned')[0])
node = etree.Element(util.nspath_eval('zs:searchRetrieveResponse', self.namespaces), nsmap=self.namespaces)
etree.SubElement(node, util.nspath_eval('zs:version', self.namespaces)).text = self.sru_version
etree.SubElement(node, util.nspath_eval('zs:numberOfRecords', self.namespaces)).text = element.xpath('//@numberOfRecordsMatched')[0]
for rec in element.xpath('//csw:BriefRecord', namespaces=self.context.namespaces):
record = etree.SubElement(node, util.nspath_eval('zs:record', self.namespaces))
etree.SubElement(node, util.nspath_eval('zs:recordSchema', self.namespaces)).text = 'info:srw/schema/1/dc-v1.1'
etree.SubElement(node, util.nspath_eval('zs:recordPacking', self.namespaces)).text = 'xml'
recorddata = etree.SubElement(record, util.nspath_eval('zs:recordData', self.namespaces))
rec.tag = util.nspath_eval('srw_dc:srw_dc', self.namespaces)
recorddata.append(rec)
etree.SubElement(record, util.nspath_eval('zs:recordPosition', self.namespaces)).text = str(recpos)
recpos += 1
elif response_name == 'ExceptionReport':
node = self.exceptionreport2diagnostic(element)
return node
def exceptionreport2diagnostic(self, element):
"""transform a CSW exception into an SRU diagnostic"""
node = etree.Element(
util.nspath_eval('zs:searchRetrieveResponse', self.namespaces), nsmap=self.namespaces)
etree.SubElement(node, util.nspath_eval('zs:version', self.namespaces)).text = self.sru_version
diagnostics = etree.SubElement(node, util.nspath_eval('zs:diagnostics', self.namespaces))
diagnostic = etree.SubElement(
diagnostics, util.nspath_eval('zs:diagnostic', self.namespaces))
etree.SubElement(diagnostic, util.nspath_eval('zd:diagnostic', self.namespaces)).text = \
'info:srw/diagnostic/1/7'
etree.SubElement(diagnostic, util.nspath_eval('zd:message', self.namespaces)).text = \
element.xpath('//ows:Exception/ows:ExceptionText|//ows20:Exception/ows20:ExceptionText', namespaces=self.context.namespaces)[0].text
etree.SubElement(diagnostic, util.nspath_eval('zd:details', self.namespaces)).text = \
element.xpath('//ows:Exception|//ows20:Exception', namespaces=self.context.namespaces)[0].attrib.get('exceptionCode')
return node
| tomkralidis/pycsw | pycsw/sru.py | Python | mit | 11,101 |
from socketserver import StreamRequestHandler, TCPServer
import socket
class EchoHandler(StreamRequestHandler):
timeout = 5
rbufsize = -1
wbufsize = 0
disable_nagle_algorithm = False
def handle(self):
print('Got connection from', self.client_address)
# self.rfile is a file-like object for reading
try:
for line in self.rfile:
# self.wfile is a file-like object for writing
self.wfile.write(line)
except socket.timeout:
print('Timed out!')
if __name__ == '__main__':
serv = TCPServer(('', 20000), EchoHandler)
print('Echo server running on port 20000')
serv.serve_forever()
| tuanavu/python-cookbook-3rd | src/11/creating_a_tcp_server/echoserv4.py | Python | mit | 696 |
#!/usr/bin/env python2
"""SSH into a running appliance and install VMware VDDK.
"""
import argparse
import sys
from six.moves.urllib.parse import urlparse
from cfme.utils.appliance import IPAppliance, get_or_create_current_appliance
def log(message):
print("[VDDK-INSTALL] {}".format(message))
def main():
parser = argparse.ArgumentParser(epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--address',
help='hostname or ip address of target appliance', default=None)
parser.add_argument('--vddk_url', help='url to download vddk pkg')
parser.add_argument('--reboot', help='reboot after installation ' +
'(required for proper operation)', action="store_true")
parser.add_argument('--force',
help='force installation if version detected', action="store_true")
args = parser.parse_args()
if not args.address:
appliance = get_or_create_current_appliance()
else:
appliance = IPAppliance(hostname=urlparse(args.address).netloc)
appliance.install_vddk(
reboot=args.reboot, force=args.force, vddk_url=args.vddk_url, log_callback=log)
if __name__ == '__main__':
sys.exit(main())
| mfalesni/cfme_tests | scripts/install_vddk.py | Python | gpl-2.0 | 1,290 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
OpenLayers Plugin
A QGIS plugin
-------------------
begin : 2010-02-03
copyright : (C) 2010 by Pirmin Kalberer, Sourcepole
email : pka at sourcepole.ch
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.core import QgsPluginLayerType
from openlayers_layer import OpenlayersLayer
class OpenlayersPluginLayerType(QgsPluginLayerType):
def __init__(self, iface, add_callback, olLayerTypeRegistry):
QgsPluginLayerType.__init__(self, OpenlayersLayer.LAYER_TYPE)
self.iface = iface
self.add_callback = add_callback
self.olLayerTypeRegistry = olLayerTypeRegistry
def createLayer(self):
layer = OpenlayersLayer(self.iface, self.olLayerTypeRegistry)
self.add_callback(layer)
return layer
def showLayerProperties(self, layer):
return False
| nextgis/ngq_compulink | qgis-installer/customization-conf/plugins/openlayers_plugin/openlayers_plugin_layer_type.py | Python | gpl-2.0 | 1,645 |
Subsets and Splits