repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
PaytonShaw/shadowsocks1 | shadowsocks/lru_cache.py | 30 | 1920 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import collections
import logging
import heapq
import time
class LRUCache(collections.MutableMapping):
"""This class is not thread safe"""
def __init__(self, timeout=60, close_callback=None, *args, **kwargs):
self.timeout = timeout
self.close_callback = close_callback
self._store = {}
self._time_to_keys = collections.defaultdict(list)
self._last_visits = []
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
# O(logm)
t = time.time()
self._time_to_keys[t].append(key)
heapq.heappush(self._last_visits, t)
return self._store[key]
def __setitem__(self, key, value):
# O(logm)
t = time.time()
self._store[key] = value
self._time_to_keys[t].append(key)
heapq.heappush(self._last_visits, t)
def __delitem__(self, key):
# O(1)
del self._store[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def sweep(self):
# O(m)
now = time.time()
c = 0
while len(self._last_visits) > 0:
least = self._last_visits[0]
if now - least <= self.timeout:
break
if self.close_callback is not None:
for key in self._time_to_keys[least]:
if self._store.__contains__(key):
value = self._store[key]
self.close_callback(value)
for key in self._time_to_keys[least]:
heapq.heappop(self._last_visits)
if self._store.__contains__(key):
del self._store[key]
c += 1
del self._time_to_keys[least]
if c:
logging.debug('%d keys swept' % c) | mit |
lionleaf/dwitter | dwitter/settings/base.py | 1 | 6241 | """
Django settings for dwitter project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_URL = 'https://www.dwitter.net/'
PARENT_HOST = 'dwitter.net'
REGISTRATION_OPEN = True # If True, users can register
ACCOUNT_ACTIVATION_DAYS = 7 # One-week activation window; you may, of course, use a different value.
REGISTRATION_AUTO_LOGIN = True # If True, the user will be automatically logged in.
LOGIN_REDIRECT_URL = BASE_URL # The page you want users to arrive at after they successful log in
LOGIN_URL = BASE_URL + '/accounts/login/' # The page users are directed to if they are not logged in,
# and are trying to access pages requiring authentication
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
ALLOWED_HOSTS = [
'dweet.localhost',
'localhost',
'www.localhost',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django_filters',
'rest_framework',
'rest_framework.authtoken',
'dwitter',
'registration',
'dwitter.user',
'dwitter.feed',
'dwitter.dweet',
'anymail',
'compressor',
'dbbackup',
'debug_toolbar',
'corsheaders',
'django_hosts',
]
DBBACKUP_STORAGE = 'django.core.files.storage.FileSystemStorage'
DBBACKUP_STORAGE_OPTIONS = {'location': 'backups'}
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
'PAGE_SIZE': 10, # Default to 10
'MAX_PAGE_SIZE': 100, # Maximum limit allowed when using `?page_size=xxx`.
'DEFAULT_RENDERER_CLASSES': ('rest_framework.renderers.JSONRenderer',),
'DEFAULT_AUTHENTICATION_CLASSES': ('rest_framework.authentication.TokenAuthentication','rest_framework.authentication.SessionAuthentication', )
}
# List of callables that know how to import templates from various sources.
#TEMPLATE_LOADERS = (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# # 'django.template.loaders.eggs.Loader',
# )
MIDDLEWARE = [
'django_hosts.middleware.HostsRequestMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_hosts.middleware.HostsResponseMiddleware',
]
ROOT_URLCONF = 'dwitter.urls'
ROOT_HOSTCONF = 'dwitter.hosts'
DEFAULT_HOST = 'www'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'builtins': [
'django_hosts.templatetags.hosts_override',
],
},
},
]
WSGI_APPLICATION = 'dwitter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CORS_URLS_REGEX = r'^/api(v2beta)?/.*$'
CORS_ORIGIN_ALLOW_ALL = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
def show_debug_toolbar_when_debug_true_but_not_for_the_dweet_subdomain(request):
if request.host.name == 'dweet':
return False
# Import here so that we get the settings from local.py as well
from django.conf import settings
return settings.DEBUG
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK':
show_debug_toolbar_when_debug_true_but_not_for_the_dweet_subdomain
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'dwitter-default',
}
}
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_COOKIE_AGE = 3600 * 24 * 365 * 100 # Keep user logged in until logout is requested (or 100 years, whichever comes first)
| apache-2.0 |
prmtl/fuel-web | fuel_agent_ci/fuel_agent_ci/tests/base.py | 4 | 3839 | # Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jinja2
import json
import os
import sys
import time
try:
from unittest.case import TestCase
except ImportError:
# Runing unit-tests in production environment
from unittest2.case import TestCase
import yaml
from fuel_agent_ci.objects import environment
from fuel_agent_ci import utils
# FIXME(kozhukalov) it is better to set this as command line arg
ENV_FILE = os.path.join(os.path.dirname(__file__),
'../../samples/ci_environment.yaml')
class BaseFuelAgentCITest(TestCase):
FUEL_AGENT_REPO_NAME = 'fuel_agent'
FUEL_AGENT_HTTP_NAME = 'http'
FUEL_AGENT_NET_NAME = 'net'
FUEL_AGENT_DHCP_NAME = 'dhcp'
FUEL_AGENT_SSH_NAME = 'vm'
FUEL_AGENT_TEMPLATE_PATH = '/usr/share/fuel-agent/cloud-init-templates'
def setUp(self):
super(BaseFuelAgentCITest, self).setUp()
# Starting environment
with open(ENV_FILE) as f:
ENV_DATA = (yaml.load(f.read()))
self.env = environment.Environment.new(**ENV_DATA)
self.env.start()
self.repo = self.env.repo_by_name(self.FUEL_AGENT_REPO_NAME)
self.ssh = self.env.ssh_by_name(self.FUEL_AGENT_SSH_NAME)
self.http = self.env.http_by_name(self.FUEL_AGENT_HTTP_NAME)
self.dhcp_hosts = self.env.dhcp_by_name(self.FUEL_AGENT_DHCP_NAME).hosts
self.net = self.env.net_by_name(self.FUEL_AGENT_NET_NAME)
self.ssh.wait()
self._upgrade_fuel_agent()
def _upgrade_fuel_agent(self):
"""This method is to be deprecated when artifact
based build system is ready.
"""
src_dir = os.path.join(self.env.envdir, self.repo.path, 'fuel_agent')
package_name = 'fuel-agent-0.1.0.tar.gz'
# Building fuel-agent pip package
utils.execute('python setup.py sdist', cwd=src_dir)
# Putting fuel-agent pip package on a node
self.ssh.put_file(
os.path.join(src_dir, 'dist', package_name),
os.path.join('/tmp', package_name))
# Installing fuel_agent pip package
self.ssh.run('pip install --upgrade %s' %
os.path.join('/tmp', package_name))
# Copying fuel_agent templates
self.ssh.run('mkdir -p %s' % self.FUEL_AGENT_TEMPLATE_PATH)
for f in os.listdir(
os.path.join(src_dir, 'cloud-init-templates')):
if f.endswith('.jinja2'):
self.ssh.put_file(
os.path.join(src_dir, 'cloud-init-templates', f),
os.path.join(self.FUEL_AGENT_TEMPLATE_PATH, f))
self.ssh.put_file(
os.path.join(src_dir, 'etc/fuel-agent/fuel-agent.conf.sample'),
'/etc/fuel-agent/fuel-agent.conf')
def tearDown(self):
super(BaseFuelAgentCITest, self).tearDown()
self.env.stop()
def render_template(self,
template_name,
template_dir=os.path.join(os.path.dirname(__file__),
'templates'),
template_data=None):
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
template = env.get_template(template_name)
return template.render(**(template_data or {}))
| apache-2.0 |
rex-xxx/mt6572_x201 | external/clang/utils/analyzer/SumTimerInfo.py | 48 | 3110 | #!/usr/bin/env python
"""
Script to Summarize statistics in the scan-build output.
Statistics are enabled by passing '-internal-stats' option to scan-build
(or '-analyzer-stats' to the analyzer).
"""
import string
from operator import itemgetter
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
print >> sys.stderr, 'Usage: ', sys.argv[0],\
'scan_build_output_file'
sys.exit(-1)
f = open(sys.argv[1], 'r')
Time = 0.0
TotalTime = 0.0
MaxTime = 0.0
Warnings = 0
Count = 0
FunctionsAnalyzed = 0
ReachableBlocks = 0
ReachedMaxSteps = 0
NumSteps = 0
NumInlinedCallSites = 0
NumBifurcatedCallSites = 0
MaxCFGSize = 0
Mode = 1
for line in f:
if ("Miscellaneous Ungrouped Timers" in line) :
Mode = 1
if (("Analyzer Total Time" in line) and (Mode == 1)) :
s = line.split()
Time = Time + float(s[6])
Count = Count + 1
if (float(s[6]) > MaxTime) :
MaxTime = float(s[6])
if ((("warning generated." in line) or ("warnings generated" in line)) and Mode == 1) :
s = line.split()
Warnings = Warnings + int(s[0])
if (("The # of functions analysed (as top level)" in line) and (Mode == 1)) :
s = line.split()
FunctionsAnalyzed = FunctionsAnalyzed + int(s[0])
if (("The % of reachable basic blocks" in line) and (Mode == 1)) :
s = line.split()
ReachableBlocks = ReachableBlocks + int(s[0])
if (("The # of times we reached the max number of steps" in line) and (Mode == 1)) :
s = line.split()
ReachedMaxSteps = ReachedMaxSteps + int(s[0])
if (("The maximum number of basic blocks in a function" in line) and (Mode == 1)) :
s = line.split()
if (MaxCFGSize < int(s[0])) :
MaxCFGSize = int(s[0])
if (("The # of steps executed" in line) and (Mode == 1)) :
s = line.split()
NumSteps = NumSteps + int(s[0])
if (("The # of times we inlined a call" in line) and (Mode == 1)) :
s = line.split()
NumInlinedCallSites = NumInlinedCallSites + int(s[0])
if (("The # of times we split the path due to imprecise dynamic dispatch info" in line) and (Mode == 1)) :
s = line.split()
NumBifurcatedCallSites = NumBifurcatedCallSites + int(s[0])
if ((") Total" in line) and (Mode == 1)) :
s = line.split()
TotalTime = TotalTime + float(s[6])
print "TU Count %d" % (Count)
print "Time %f" % (Time)
print "Warnings %d" % (Warnings)
print "Functions Analyzed %d" % (FunctionsAnalyzed)
print "Reachable Blocks %d" % (ReachableBlocks)
print "Reached Max Steps %d" % (ReachedMaxSteps)
print "Number of Steps %d" % (NumSteps)
print "Number of Inlined calls %d (bifurcated %d)" % (NumInlinedCallSites, NumBifurcatedCallSites)
print "MaxTime %f" % (MaxTime)
print "TotalTime %f" % (TotalTime)
print "Max CFG Size %d" % (MaxCFGSize)
| gpl-2.0 |
mfherbst/spack | lib/spack/spack/test/cmd/flake8.py | 4 | 3469 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
import os
import pytest
import sys
from llnl.util.filesystem import FileFilter
import spack.paths
from spack.cmd.flake8 import flake8, setup_parser, changed_files
from spack.repo import Repo
from spack.util.executable import which
@pytest.fixture(scope='module')
def parser():
"""Returns the parser for the ``flake8`` command"""
parser = argparse.ArgumentParser()
setup_parser(parser)
return parser
@pytest.fixture(scope='module')
def flake8_package():
"""Flake8 only checks files that have been modified.
This fixture makes a small change to the ``flake8``
mock package, yields the filename, then undoes the
change on cleanup.
"""
repo = Repo(spack.paths.mock_packages_path)
filename = repo.filename_for_package_name('flake8')
package = FileFilter(filename)
# Make the change
package.filter("state = 'unmodified'", "state = 'modified'", string=True)
yield filename
# Undo the change
package.filter("state = 'modified'", "state = 'unmodified'", string=True)
def test_changed_files(parser, flake8_package):
args = parser.parse_args()
# changed_files returns file paths relative to the root
# directory of Spack. Convert to absolute file paths.
files = changed_files(args)
files = [os.path.join(spack.paths.prefix, path) for path in files]
# There will likely be other files that have changed
# when these tests are run
assert flake8_package in files
# As of flake8 3.0.0, Python 2.6 and 3.3 are no longer supported
# http://flake8.pycqa.org/en/latest/release-notes/3.0.0.html
@pytest.mark.skipif(
sys.version_info[:2] <= (2, 6) or
(3, 0) <= sys.version_info[:2] <= (3, 3),
reason='flake8 no longer supports Python 2.6 or 3.3 and older')
@pytest.mark.skipif(not which('flake8'), reason='flake8 is not installed.')
def test_flake8(parser, flake8_package):
# Only test the flake8_package that we modified
# Otherwise, the unit tests would fail every time
# the flake8 tests fail
args = parser.parse_args([flake8_package])
flake8(parser, args)
# Get even more coverage
args = parser.parse_args(['--output', '--root-relative', flake8_package])
flake8(parser, args)
| lgpl-2.1 |
Francis-Liu/animated-broccoli | nova/tests/unit/api/ec2/test_middleware.py | 19 | 8419 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import mock
from oslo_config import cfg
from oslo_utils import timeutils
import requests
from six.moves import range
import webob
import webob.dec
import webob.exc
from nova.api import ec2
from nova import context
from nova import exception
from nova import test
from nova import wsgi
CONF = cfg.CONF
@webob.dec.wsgify
def conditional_forbid(req):
"""Helper wsgi app returns 403 if param 'die' is 1."""
if 'die' in req.params and req.params['die'] == '1':
raise webob.exc.HTTPForbidden()
return 'OK'
class LockoutTestCase(test.NoDBTestCase):
"""Test case for the Lockout middleware."""
def setUp(self):
super(LockoutTestCase, self).setUp()
timeutils.set_time_override()
self.lockout = ec2.Lockout(conditional_forbid)
def tearDown(self):
timeutils.clear_time_override()
super(LockoutTestCase, self).tearDown()
def _send_bad_attempts(self, access_key, num_attempts=1):
"""Fail x."""
for i in range(num_attempts):
req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key)
self.assertEqual(req.get_response(self.lockout).status_int, 403)
def _is_locked_out(self, access_key):
"""Sends a test request to see if key is locked out."""
req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key)
return (req.get_response(self.lockout).status_int == 403)
def test_lockout(self):
self._send_bad_attempts('test', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test'))
def test_timeout(self):
self._send_bad_attempts('test', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test'))
timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test'))
def test_multiple_keys(self):
self._send_bad_attempts('test1', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
def test_window_timeout(self):
self._send_bad_attempts('test', CONF.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
timeutils.advance_time_seconds(CONF.lockout_window * 60)
self._send_bad_attempts('test', CONF.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
class ExecutorTestCase(test.NoDBTestCase):
def setUp(self):
super(ExecutorTestCase, self).setUp()
self.executor = ec2.Executor()
def _execute(self, invoke):
class Fake(object):
pass
fake_ec2_request = Fake()
fake_ec2_request.invoke = invoke
fake_wsgi_request = Fake()
fake_wsgi_request.environ = {
'nova.context': context.get_admin_context(),
'ec2.request': fake_ec2_request,
}
return self.executor(fake_wsgi_request)
def _extract_message(self, result):
tree = etree.fromstring(result.body)
return tree.findall('./Errors')[0].find('Error/Message').text
def _extract_code(self, result):
tree = etree.fromstring(result.body)
return tree.findall('./Errors')[0].find('Error/Code').text
def test_instance_not_found(self):
def not_found(context):
raise exception.InstanceNotFound(instance_id=5)
result = self._execute(not_found)
self.assertIn('i-00000005', self._extract_message(result))
self.assertEqual('InvalidInstanceID.NotFound',
self._extract_code(result))
def test_instance_not_found_none(self):
def not_found(context):
raise exception.InstanceNotFound(instance_id=None)
# NOTE(mikal): we want no exception to be raised here, which was what
# was happening in bug/1080406
result = self._execute(not_found)
self.assertIn('None', self._extract_message(result))
self.assertEqual('InvalidInstanceID.NotFound',
self._extract_code(result))
def test_snapshot_not_found(self):
def not_found(context):
raise exception.SnapshotNotFound(snapshot_id=5)
result = self._execute(not_found)
self.assertIn('snap-00000005', self._extract_message(result))
self.assertEqual('InvalidSnapshot.NotFound',
self._extract_code(result))
def test_volume_not_found(self):
def not_found(context):
raise exception.VolumeNotFound(volume_id=5)
result = self._execute(not_found)
self.assertIn('vol-00000005', self._extract_message(result))
self.assertEqual('InvalidVolume.NotFound', self._extract_code(result))
def test_floating_ip_bad_create_request(self):
def bad_request(context):
raise exception.FloatingIpBadRequest()
result = self._execute(bad_request)
self.assertIn('BadRequest', self._extract_message(result))
self.assertEqual('UnsupportedOperation', self._extract_code(result))
class FakeResponse(object):
reason = "Test Reason"
def __init__(self, status_code=400):
self.status_code = status_code
def json(self):
return {}
class KeystoneAuthTestCase(test.NoDBTestCase):
def setUp(self):
super(KeystoneAuthTestCase, self).setUp()
self.kauth = ec2.EC2KeystoneAuth(conditional_forbid)
def _validate_ec2_error(self, response, http_status, ec2_code):
self.assertEqual(response.status_code, http_status,
'Expected HTTP status %s' % http_status)
root_e = etree.XML(response.body)
self.assertEqual(root_e.tag, 'Response',
"Top element must be Response.")
errors_e = root_e.find('Errors')
error_e = errors_e[0]
code_e = error_e.find('Code')
self.assertIsNotNone(code_e, "Code element must be present.")
self.assertEqual(code_e.text, ec2_code)
def test_no_signature(self):
req = wsgi.Request.blank('/test')
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
def test_no_key_id(self):
req = wsgi.Request.blank('/test')
req.GET['Signature'] = 'test-signature'
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
@mock.patch.object(requests, 'request', return_value=FakeResponse())
def test_communication_failure(self, mock_request):
req = wsgi.Request.blank('/test')
req.GET['Signature'] = 'test-signature'
req.GET['AWSAccessKeyId'] = 'test-key-id'
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
mock_request.assert_called_with('POST', CONF.keystone_ec2_url,
data=mock.ANY, headers=mock.ANY,
verify=mock.ANY, cert=mock.ANY)
@mock.patch.object(requests, 'request', return_value=FakeResponse(200))
def test_no_result_data(self, mock_request):
req = wsgi.Request.blank('/test')
req.GET['Signature'] = 'test-signature'
req.GET['AWSAccessKeyId'] = 'test-key-id'
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
mock_request.assert_called_with('POST', CONF.keystone_ec2_url,
data=mock.ANY, headers=mock.ANY,
verify=mock.ANY, cert=mock.ANY)
| apache-2.0 |
styk-tv/offlineimap | offlineimap/ui/Noninteractive.py | 18 | 1248 | # Noninteractive UI
# Copyright (C) 2002-2012 John Goerzen & contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
from offlineimap.ui.UIBase import UIBase
class Basic(UIBase):
"""'Quiet' simply sets log level to INFO"""
def __init__(self, config, loglevel = logging.INFO):
return super(Basic, self).__init__(config, loglevel)
class Quiet(UIBase):
"""'Quiet' simply sets log level to WARNING"""
def __init__(self, config, loglevel = logging.WARNING):
return super(Quiet, self).__init__(config, loglevel)
| gpl-2.0 |
magnastrazh/NEUCOGAR | nest/serotonin/research/C/nest-2.10.0/doc/nest_by_example/scripts/brunel2000_interactive.py | 13 | 3535 | # -*- coding: utf-8 -*-
#
# brunel2000_interactive.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import nest.raster_plot
import pylab
# Network parameters. These are given in Brunel (2000) J.Comp.Neuro.
g = 5.0 # Ratio of IPSP to EPSP amplitude: J_I/J_E
eta = 2.0 # rate of external population in multiples of threshold rate
delay = 1.5 # synaptic delay in ms
tau_m = 20.0 # Membrane time constant in mV
V_th = 20.0 # Spike threshold in mV
N_E = 8000
N_I = 2000
N_neurons = N_E + N_I
C_E = int(N_E / 10) # number of excitatory synapses per neuron
C_I = int(N_I / 10) # number of inhibitory synapses per neuron
J_E = 0.1
J_I = -g * J_E
nu_ex = eta * V_th / (J_E * C_E * tau_m) # rate of an external neuron in ms^-1
p_rate = 1000.0 * nu_ex * C_E # rate of the external population in s^-1
# Set parameters of the NEST simulation kernel
nest.SetKernelStatus({'print_time': True,
'local_num_threads': 2})
nest.SetDefaults('iaf_psc_delta',
{'C_m': 1.0,
'tau_m': tau_m,
't_ref': 2.0,
'E_L': 0.0,
'V_th': V_th,
'V_reset': 10.0})
# Create nodes -------------------------------------------------
nodes = nest.Create('iaf_psc_delta', N_neurons)
nodes_E = nodes[:N_E]
nodes_I = nodes[N_E:]
noise=nest.Create('poisson_generator', 1, {'rate': p_rate})
spikes=nest.Create('spike_detector',2,
[{'label': 'brunel_py_ex'},
{'label': 'brunel_py_in'}])
spikes_E=spikes[:1]
spikes_I=spikes[1:]
# Connect nodes ------------------------------------------------
nest.CopyModel('static_synapse_hom_w',
'excitatory',
{'weight':J_E,
'delay':delay})
nest.Connect(nodes_E, nodes,
{'rule': 'fixed_indegree',
'indegree': C_E},
'excitatory')
nest.CopyModel('static_synapse_hom_w',
'inhibitory',
{'weight':J_I,
'delay':delay})
nest.Connect(nodes_I, nodes,
{'rule': 'fixed_indegree',
'indegree': C_I},
'inhibitory')
nest.Connect(noise, nodes, syn_spec='excitatory')
N_rec = 50 # Number of neurons to record from
nest.Connect(nodes_E[:N_rec], spikes_E)
nest.Connect(nodes_I[:N_rec], spikes_I)
# Simulate -----------------------------------------------------
simtime = 300.
nest.Simulate(simtime)
ex_events, in_events = nest.GetStatus(spikes, 'n_events')
events_to_rate = 1000. / simtime /N_rec
rate_ex = ex_events * events_to_rate
print('Excitatory rate: {:.2f} Hz'.format(rate_ex))
rate_in = in_events * events_to_rate
print('Inhibitory rate: {:.2f} Hz'.format(rate_in))
nest.raster_plot.from_device(spikes_E, hist=True)
#pylab.show()
pylab.savefig('../figures/brunel_interactive.eps')
| gpl-2.0 |
hilaskis/UAV_MissionPlanner | Lib/pipes.py | 82 | 9647 | """Conversion pipeline templates.
The problem:
------------
Suppose you have some data that you want to convert to another format,
such as from GIF image format to PPM image format. Maybe the
conversion involves several steps (e.g. piping it through compress or
uuencode). Some of the conversion steps may require that their input
is a disk file, others may be able to read standard input; similar for
their output. The input to the entire conversion may also be read
from a disk file or from an open file, and similar for its output.
The module lets you construct a pipeline template by sticking one or
more conversion steps together. It will take care of creating and
removing temporary files if they are necessary to hold intermediate
data. You can then use the template to do conversions from many
different sources to many different destinations. The temporary
file names used are different each time the template is used.
The templates are objects so you can create templates for many
different conversion steps and store them in a dictionary, for
instance.
Directions:
-----------
To create a template:
t = Template()
To add a conversion step to a template:
t.append(command, kind)
where kind is a string of two characters: the first is '-' if the
command reads its standard input or 'f' if it requires a file; the
second likewise for the output. The command must be valid /bin/sh
syntax. If input or output files are required, they are passed as
$IN and $OUT; otherwise, it must be possible to use the command in
a pipeline.
To add a conversion step at the beginning:
t.prepend(command, kind)
To convert a file to another file using a template:
sts = t.copy(infile, outfile)
If infile or outfile are the empty string, standard input is read or
standard output is written, respectively. The return value is the
exit status of the conversion pipeline.
To open a file for reading or writing through a conversion pipeline:
fp = t.open(file, mode)
where mode is 'r' to read the file, or 'w' to write it -- just like
for the built-in function open() or for os.popen().
To create a new template object initialized to a given one:
t2 = t.clone()
For an example, see the function test() at the end of the file.
""" # '
import re
import os
import tempfile
import string
__all__ = ["Template"]
# Conversion step kinds
FILEIN_FILEOUT = 'ff' # Must read & write real files
STDIN_FILEOUT = '-f' # Must write a real file
FILEIN_STDOUT = 'f-' # Must read a real file
STDIN_STDOUT = '--' # Normal pipeline element
SOURCE = '.-' # Must be first, writes stdout
SINK = '-.' # Must be last, reads stdin
stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \
SOURCE, SINK]
class Template:
"""Class representing a pipeline template."""
def __init__(self):
"""Template() returns a fresh pipeline template."""
self.debugging = 0
self.reset()
def __repr__(self):
"""t.__repr__() implements repr(t)."""
return '<Template instance, steps=%r>' % (self.steps,)
def reset(self):
"""t.reset() restores a pipeline template to its initial state."""
self.steps = []
def clone(self):
"""t.clone() returns a new pipeline template with identical
initial state as the current one."""
t = Template()
t.steps = self.steps[:]
t.debugging = self.debugging
return t
def debug(self, flag):
"""t.debug(flag) turns debugging on or off."""
self.debugging = flag
def append(self, cmd, kind):
"""t.append(cmd, kind) adds a new step at the end."""
if type(cmd) is not type(''):
raise TypeError, \
'Template.append: cmd must be a string'
if kind not in stepkinds:
raise ValueError, \
'Template.append: bad kind %r' % (kind,)
if kind == SOURCE:
raise ValueError, \
'Template.append: SOURCE can only be prepended'
if self.steps and self.steps[-1][1] == SINK:
raise ValueError, \
'Template.append: already ends with SINK'
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError, \
'Template.append: missing $IN in cmd'
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError, \
'Template.append: missing $OUT in cmd'
self.steps.append((cmd, kind))
def prepend(self, cmd, kind):
"""t.prepend(cmd, kind) adds a new step at the front."""
if type(cmd) is not type(''):
raise TypeError, \
'Template.prepend: cmd must be a string'
if kind not in stepkinds:
raise ValueError, \
'Template.prepend: bad kind %r' % (kind,)
if kind == SINK:
raise ValueError, \
'Template.prepend: SINK can only be appended'
if self.steps and self.steps[0][1] == SOURCE:
raise ValueError, \
'Template.prepend: already begins with SOURCE'
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError, \
'Template.prepend: missing $IN in cmd'
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError, \
'Template.prepend: missing $OUT in cmd'
self.steps.insert(0, (cmd, kind))
def open(self, file, rw):
"""t.open(file, rw) returns a pipe or file object open for
reading or writing; the file is the other end of the pipeline."""
if rw == 'r':
return self.open_r(file)
if rw == 'w':
return self.open_w(file)
raise ValueError, \
'Template.open: rw must be \'r\' or \'w\', not %r' % (rw,)
def open_r(self, file):
"""t.open_r(file) and t.open_w(file) implement
t.open(file, 'r') and t.open(file, 'w') respectively."""
if not self.steps:
return open(file, 'r')
if self.steps[-1][1] == SINK:
raise ValueError, \
'Template.open_r: pipeline ends width SINK'
cmd = self.makepipeline(file, '')
return os.popen(cmd, 'r')
def open_w(self, file):
if not self.steps:
return open(file, 'w')
if self.steps[0][1] == SOURCE:
raise ValueError, \
'Template.open_w: pipeline begins with SOURCE'
cmd = self.makepipeline('', file)
return os.popen(cmd, 'w')
def copy(self, infile, outfile):
return os.system(self.makepipeline(infile, outfile))
def makepipeline(self, infile, outfile):
cmd = makepipeline(infile, self.steps, outfile)
if self.debugging:
print cmd
cmd = 'set -x; ' + cmd
return cmd
def makepipeline(infile, steps, outfile):
# Build a list with for each command:
# [input filename or '', command string, kind, output filename or '']
list = []
for cmd, kind in steps:
list.append(['', cmd, kind, ''])
#
# Make sure there is at least one step
#
if not list:
list.append(['', 'cat', '--', ''])
#
# Take care of the input and output ends
#
[cmd, kind] = list[0][1:3]
if kind[0] == 'f' and not infile:
list.insert(0, ['', 'cat', '--', ''])
list[0][0] = infile
#
[cmd, kind] = list[-1][1:3]
if kind[1] == 'f' and not outfile:
list.append(['', 'cat', '--', ''])
list[-1][-1] = outfile
#
# Invent temporary files to connect stages that need files
#
garbage = []
for i in range(1, len(list)):
lkind = list[i-1][2]
rkind = list[i][2]
if lkind[1] == 'f' or rkind[0] == 'f':
(fd, temp) = tempfile.mkstemp()
os.close(fd)
garbage.append(temp)
list[i-1][-1] = list[i][0] = temp
#
for item in list:
[inf, cmd, kind, outf] = item
if kind[1] == 'f':
cmd = 'OUT=' + quote(outf) + '; ' + cmd
if kind[0] == 'f':
cmd = 'IN=' + quote(inf) + '; ' + cmd
if kind[0] == '-' and inf:
cmd = cmd + ' <' + quote(inf)
if kind[1] == '-' and outf:
cmd = cmd + ' >' + quote(outf)
item[1] = cmd
#
cmdlist = list[0][1]
for item in list[1:]:
[cmd, kind] = item[1:3]
if item[0] == '':
if 'f' in kind:
cmd = '{ ' + cmd + '; }'
cmdlist = cmdlist + ' |\n' + cmd
else:
cmdlist = cmdlist + '\n' + cmd
#
if garbage:
rmcmd = 'rm -f'
for file in garbage:
rmcmd = rmcmd + ' ' + quote(file)
trapcmd = 'trap ' + quote(rmcmd + '; exit') + ' 1 2 3 13 14 15'
cmdlist = trapcmd + '\n' + cmdlist + '\n' + rmcmd
#
return cmdlist
# Reliably quote a string as a single argument for /bin/sh
# Safe unquoted
_safechars = frozenset(string.ascii_letters + string.digits + '@%_-+=:,./')
def quote(file):
"""Return a shell-escaped version of the file string."""
for c in file:
if c not in _safechars:
break
else:
if not file:
return "''"
return file
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + file.replace("'", "'\"'\"'") + "'"
| gpl-2.0 |
unicefuganda/uSurvey | survey/tests/views/test_question_template.py | 1 | 3669 | from model_mommy import mommy
from django.test.client import Client
from django.contrib.auth.models import User
from survey.models.batch import Batch
from django.core.urlresolvers import reverse
from survey.models import (QuestionModule, Interviewer, EnumerationArea, QuestionTemplate, NumericalAnswer,
TextAnswer, MultiChoiceAnswer, DateAnswer, QuestionOption, Interview, ListingTemplate,
ODKAccess, Question, QuestionSet,Batch, ResponseValidation, Survey)
from survey.models import QuestionTemplate, Survey, QuestionModule, Batch, ResponseValidation
from survey.tests.base_test import BaseTest
class QuestionsTemplateViewsTest(BaseTest):
def setUp(self):
self.client = Client()
user_without_permission = User.objects.create_user(username='useless', email='[email protected]',
password='I_Suck')
raj = self.assign_permission_to(User.objects.create_user('demo9', '[email protected]', 'demo9'),
'can_view_batches')
self.client.login(username='demo9', password='demo9')
self.rsp = ResponseValidation.objects.create(validation_test="validation",constraint_message="msg")
self.module = QuestionModule.objects.create(name="Education",description="bla blaaa")
self.question_1 = QuestionTemplate.objects.create(module_id=self.module.id,identifier='1.1',text='ttt',answer_type='Numerical Answer',response_validation_id=1)
def test_index(self):
response = self.client.get(reverse('show_question_library'))
self.failUnlessEqual(response.status_code, 200)
def test_export(self):
response = self.client.get(reverse('export_question_library'))
self.failUnlessEqual(response.status_code, 200)
def test_add(self):
url = reverse('new_question_library')
response = self.client.get(url)
self.assertIn('questionform', response.context)
data = {'text': 'lib test text', 'identifier': 'test_identifier',
'module': self.module.id, 'answer_type': 'Numerical Answer'}
response = self.client.post(url, data=data)
self.failUnlessEqual(response.status_code, 302)
template = QuestionTemplate.objects.filter(text=data['text']).first()
self.assertTrue(QuestionTemplate.objects.filter(text=data['text']).exists())
created_question = QuestionTemplate.objects.filter(text=data['text']).first()
url = reverse('edit_%s' % QuestionTemplate.resolve_tag(), args=(template.id, ))
data['text'] = 'edited entry'
response = self.client.post(url, data=data)
self.assertTrue(QuestionTemplate.objects.filter(text=data['text']).count(), 1)
def test_delete_template_question(self):
question = mommy.make(QuestionTemplate)
url = reverse('delete_question_template_page', args=(question.id, ))
response = self.client.get(url)
self.assertFalse(QuestionTemplate.objects.filter(id=question.id).exists())
def test_filter(self):
response = self.client.get(reverse('filter_question_list'))
self.failUnlessEqual(response.status_code, 200)
# def test_qt_does_not_exist(self):
# message = "Question Template does not exist."
# self.assert_object_does_not_exist(reverse('edit_question_library',kwargs={"question_id":500}), message)
# def test_should_throw_error_if_deleting_non_existing_qt(self):
# message = "Question Template does not exist."
# self.assert_object_does_not_exist(reverse('delete_question_library',kwargs={"question_id":500}), message) | bsd-3-clause |
gaddman/ansible | test/units/modules/network/cloudvision/test_cv_server_provision.py | 15 | 46159 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from units.compat import unittest
from units.compat.mock import patch, Mock
import sys
sys.modules['cvprac'] = Mock()
sys.modules['cvprac.cvp_client'] = Mock()
sys.modules['cvprac.cvp_client_errors'] = Mock()
from ansible.modules.network.cloudvision import cv_server_provision
class MockException(BaseException):
pass
class TestCvServerProvision(unittest.TestCase):
@patch('ansible.modules.network.cloudvision.cv_server_provision.CvpApiError',
new_callable=lambda: MockException)
@patch('ansible.modules.network.cloudvision.cv_server_provision.server_configurable_configlet')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_in_compliance')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.connect')
@patch('ansible.modules.network.cloudvision.cv_server_provision.AnsibleModule')
def test_main_module_args(self, mock_module, mock_connect, mock_info,
mock_comp, mock_server_conf, mock_exception):
''' Test main module args.
'''
mock_module_object = Mock()
mock_module_object.params = dict(action='show', switch_name='eos')
mock_module_object.fail_json.side_effect = SystemExit('Exiting')
mock_module.return_value = mock_module_object
mock_connect.return_value = 'Client'
mock_info.side_effect = mock_exception('Error Getting Info')
argument_spec = dict(
host=dict(required=True),
port=dict(required=False, default=None),
protocol=dict(default='https', choices=['http', 'https']),
username=dict(required=True),
password=dict(required=True, no_log=True),
server_name=dict(required=True),
switch_name=dict(required=True),
switch_port=dict(required=True),
port_vlan=dict(required=False, default=None),
template=dict(require=True),
action=dict(default='show', choices=['show', 'add', 'remove']),
auto_run=dict(type='bool', default=False),
)
self.assertRaises(SystemExit, cv_server_provision.main)
mock_module.assert_called_with(argument_spec=argument_spec,
supports_check_mode=False)
self.assertEqual(mock_connect.call_count, 1)
self.assertEqual(mock_info.call_count, 1)
mock_comp.assert_not_called()
mock_server_conf.assert_not_called()
mock_module_object.fail_json.assert_called_with(msg='Error Getting Info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.CvpApiError',
new_callable=lambda: MockException)
@patch('ansible.modules.network.cloudvision.cv_server_provision.server_configurable_configlet')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_in_compliance')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.connect')
@patch('ansible.modules.network.cloudvision.cv_server_provision.AnsibleModule')
def test_main_no_switch_configlet(self, mock_module, mock_connect,
mock_info, mock_comp, mock_server_conf,
mock_exception):
''' Test main fails if switch has no configlet for Ansible to edit.
'''
mock_module_object = Mock()
mock_module_object.params = dict(action='add', switch_name='eos')
mock_module_object.fail_json.side_effect = SystemExit('Exiting')
mock_module.return_value = mock_module_object
mock_connect.return_value = 'Client'
mock_info.return_value = 'Info'
mock_server_conf.return_value = None
self.assertRaises(SystemExit, cv_server_provision.main)
self.assertEqual(mock_connect.call_count, 1)
self.assertEqual(mock_info.call_count, 1)
self.assertEqual(mock_comp.call_count, 1)
self.assertEqual(mock_server_conf.call_count, 1)
mock_module_object.fail_json.assert_called_with(
msg='Switch eos has no configurable server ports.')
@patch('ansible.modules.network.cloudvision.cv_server_provision.CvpApiError',
new_callable=lambda: MockException)
@patch('ansible.modules.network.cloudvision.cv_server_provision.port_configurable')
@patch('ansible.modules.network.cloudvision.cv_server_provision.server_configurable_configlet')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_in_compliance')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.connect')
@patch('ansible.modules.network.cloudvision.cv_server_provision.AnsibleModule')
def test_main_port_not_in_config(self, mock_module, mock_connect, mock_info,
mock_comp, mock_server_conf,
mock_port_conf, mock_exception):
''' Test main fails if user specified port not in configlet.
'''
mock_module_object = Mock()
mock_module_object.params = dict(action='add', switch_name='eos',
switch_port='3')
mock_module_object.fail_json.side_effect = SystemExit('Exiting')
mock_module.return_value = mock_module_object
mock_connect.return_value = 'Client'
mock_info.return_value = 'Info'
mock_server_conf.return_value = 'Configlet'
mock_port_conf.return_value = None
self.assertRaises(SystemExit, cv_server_provision.main)
self.assertEqual(mock_connect.call_count, 1)
self.assertEqual(mock_info.call_count, 1)
self.assertEqual(mock_comp.call_count, 1)
self.assertEqual(mock_server_conf.call_count, 1)
self.assertEqual(mock_port_conf.call_count, 1)
mock_module_object.fail_json.assert_called_with(
msg='Port 3 is not configurable as a server port on switch eos.')
@patch('ansible.modules.network.cloudvision.cv_server_provision.configlet_action')
@patch('ansible.modules.network.cloudvision.cv_server_provision.port_configurable')
@patch('ansible.modules.network.cloudvision.cv_server_provision.server_configurable_configlet')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_in_compliance')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.connect')
@patch('ansible.modules.network.cloudvision.cv_server_provision.AnsibleModule')
def test_main_show(self, mock_module, mock_connect, mock_info, mock_comp,
mock_server_conf, mock_port_conf, mock_conf_action):
''' Test main good with show action.
'''
mock_module_object = Mock()
mock_module_object.params = dict(action='show', switch_name='eos',
switch_port='3', auto_run=False)
mock_module.return_value = mock_module_object
mock_connect.return_value = 'Client'
mock_info.return_value = 'Info'
mock_server_conf.return_value = 'Configlet'
mock_port_conf.return_value = 'Port'
mock_conf_action.return_value = dict()
cv_server_provision.main()
self.assertEqual(mock_connect.call_count, 1)
self.assertEqual(mock_info.call_count, 1)
mock_comp.assert_not_called()
self.assertEqual(mock_server_conf.call_count, 1)
self.assertEqual(mock_port_conf.call_count, 1)
self.assertEqual(mock_conf_action.call_count, 1)
mock_module_object.fail_json.assert_not_called()
return_dict = dict(changed=False, switchInfo='Info',
switchConfigurable=True, portConfigurable=True,
taskCreated=False, taskExecuted=False,
taskCompleted=False)
mock_module_object.exit_json.assert_called_with(**return_dict)
@patch('ansible.modules.network.cloudvision.cv_server_provision.configlet_action')
@patch('ansible.modules.network.cloudvision.cv_server_provision.port_configurable')
@patch('ansible.modules.network.cloudvision.cv_server_provision.server_configurable_configlet')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_in_compliance')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.connect')
@patch('ansible.modules.network.cloudvision.cv_server_provision.AnsibleModule')
def test_main_add_no_auto_run(self, mock_module, mock_connect, mock_info,
mock_comp, mock_server_conf, mock_port_conf,
mock_conf_action):
''' Test main good with add action and no auto_run.
'''
mock_module_object = Mock()
mock_module_object.params = dict(action='add', switch_name='eos',
switch_port='3', auto_run=False)
mock_module.return_value = mock_module_object
mock_connect.return_value = 'Client'
mock_info.return_value = 'Info'
mock_server_conf.return_value = 'Configlet'
mock_port_conf.return_value = 'Port'
mock_conf_action.return_value = dict(taskCreated=True)
cv_server_provision.main()
self.assertEqual(mock_connect.call_count, 1)
self.assertEqual(mock_info.call_count, 1)
self.assertEqual(mock_comp.call_count, 1)
self.assertEqual(mock_server_conf.call_count, 1)
self.assertEqual(mock_port_conf.call_count, 1)
self.assertEqual(mock_conf_action.call_count, 1)
mock_module_object.fail_json.assert_not_called()
return_dict = dict(changed=False, switchInfo='Info',
switchConfigurable=True, portConfigurable=True,
taskCreated=True, taskExecuted=False,
taskCompleted=False)
mock_module_object.exit_json.assert_called_with(**return_dict)
@patch('ansible.modules.network.cloudvision.cv_server_provision.wait_for_task_completion')
@patch('ansible.modules.network.cloudvision.cv_server_provision.configlet_update_task')
@patch('ansible.modules.network.cloudvision.cv_server_provision.configlet_action')
@patch('ansible.modules.network.cloudvision.cv_server_provision.port_configurable')
@patch('ansible.modules.network.cloudvision.cv_server_provision.server_configurable_configlet')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_in_compliance')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.connect')
@patch('ansible.modules.network.cloudvision.cv_server_provision.AnsibleModule')
def test_main_add_auto_run(self, mock_module, mock_connect, mock_info,
mock_comp, mock_server_conf, mock_port_conf,
mock_conf_action, mock_conf_task, mock_wait):
''' Test main good with add and auto_run. Config updated, task created.
'''
mock_module_object = Mock()
mock_module_object.params = dict(action='add', switch_name='eos',
switch_port='3', auto_run=True)
mock_module.return_value = mock_module_object
mock_client_object = Mock()
mock_connect.return_value = mock_client_object
mock_info.return_value = 'Info'
mock_server_conf.return_value = 'Configlet'
mock_port_conf.return_value = 'Port'
mock_conf_action.return_value = dict(taskCreated=True, changed=True)
mock_conf_task.return_value = '7'
mock_wait.return_value = True
cv_server_provision.main()
self.assertEqual(mock_connect.call_count, 1)
self.assertEqual(mock_info.call_count, 1)
self.assertEqual(mock_comp.call_count, 1)
self.assertEqual(mock_server_conf.call_count, 1)
self.assertEqual(mock_port_conf.call_count, 1)
self.assertEqual(mock_conf_action.call_count, 1)
self.assertEqual(mock_conf_task.call_count, 1)
self.assertEqual(mock_wait.call_count, 1)
mock_module_object.fail_json.assert_not_called()
return_dict = dict(changed=True, switchInfo='Info', taskId='7',
switchConfigurable=True, portConfigurable=True,
taskCreated=True, taskExecuted=True,
taskCompleted=True)
mock_module_object.exit_json.assert_called_with(**return_dict)
@patch('ansible.modules.network.cloudvision.cv_server_provision.wait_for_task_completion')
@patch('ansible.modules.network.cloudvision.cv_server_provision.configlet_update_task')
@patch('ansible.modules.network.cloudvision.cv_server_provision.configlet_action')
@patch('ansible.modules.network.cloudvision.cv_server_provision.port_configurable')
@patch('ansible.modules.network.cloudvision.cv_server_provision.server_configurable_configlet')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_in_compliance')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.connect')
@patch('ansible.modules.network.cloudvision.cv_server_provision.AnsibleModule')
def test_main_add_auto_run_no_task(self, mock_module, mock_connect,
mock_info, mock_comp, mock_server_conf,
mock_port_conf, mock_conf_action, mock_conf_task,
mock_wait):
''' Test main good with add and auto_run. Config not updated, no task.
'''
mock_module_object = Mock()
mock_module_object.params = dict(action='add', switch_name='eos',
switch_port='3', auto_run=True)
mock_module.return_value = mock_module_object
mock_client_object = Mock()
mock_connect.return_value = mock_client_object
mock_info.return_value = 'Info'
mock_server_conf.return_value = 'Configlet'
mock_port_conf.return_value = 'Port'
mock_conf_action.return_value = dict(taskCreated=True, changed=False)
mock_conf_task.return_value = None
cv_server_provision.main()
self.assertEqual(mock_connect.call_count, 1)
self.assertEqual(mock_info.call_count, 1)
self.assertEqual(mock_comp.call_count, 1)
self.assertEqual(mock_server_conf.call_count, 1)
self.assertEqual(mock_port_conf.call_count, 1)
self.assertEqual(mock_conf_action.call_count, 1)
self.assertEqual(mock_conf_task.call_count, 1)
mock_wait.assert_not_called()
mock_module_object.fail_json.assert_not_called()
return_dict = dict(changed=False, switchInfo='Info',
switchConfigurable=True, portConfigurable=True,
taskCreated=False, taskExecuted=False,
taskCompleted=False)
mock_module_object.exit_json.assert_called_with(**return_dict)
@patch('ansible.modules.network.cloudvision.cv_server_provision.CvpClient')
def test_connect_good(self, mock_client):
''' Test connect success.
'''
module = Mock()
module.params = dict(host='host', username='username',
password='password', protocol='https', port='10')
connect_mock = Mock()
mock_client.return_value = connect_mock
client = cv_server_provision.connect(module)
self.assertIsInstance(client, Mock)
self.assertEqual(mock_client.call_count, 1)
connect_mock.connect.assert_called_once_with(['host'], 'username',
'password', port='10',
protocol='https')
module.fail_json.assert_not_called()
@patch('ansible.modules.network.cloudvision.cv_server_provision.CvpLoginError',
new_callable=lambda: MockException)
@patch('ansible.modules.network.cloudvision.cv_server_provision.CvpClient')
def test_connect_fail(self, mock_client, mock_exception):
''' Test connect failure with login error.
'''
module = Mock()
module.params = dict(host='host', username='username',
password='password', protocol='https', port='10')
module.fail_json.side_effect = SystemExit
connect_mock = Mock()
connect_mock.connect.side_effect = mock_exception('Login Error')
mock_client.return_value = connect_mock
self.assertRaises(SystemExit, cv_server_provision.connect, module)
self.assertEqual(connect_mock.connect.call_count, 1)
module.fail_json.assert_called_once_with(msg='Login Error')
def test_switch_info_good(self):
''' Test switch_info success.
'''
module = Mock()
module.params = dict(switch_name='eos')
module.client.api.get_device_by_name.return_value = dict(fqdn='eos')
info = cv_server_provision.switch_info(module)
self.assertEqual(module.client.api.get_device_by_name.call_count, 1)
self.assertEqual(info['fqdn'], 'eos')
module.fail_json.assert_not_called()
def test_switch_info_no_switch(self):
''' Test switch_info fails.
'''
module = Mock()
module.params = dict(switch_name='eos')
module.client.api.get_device_by_name.return_value = None
info = cv_server_provision.switch_info(module)
self.assertEqual(module.client.api.get_device_by_name.call_count, 1)
self.assertEqual(info, None)
module.fail_json.assert_called_once_with(
msg="Device with name 'eos' does not exist.")
def test_switch_in_compliance_good(self):
''' Test switch_in_compliance good.
'''
module = Mock()
module.client.api.check_compliance.return_value = dict(
complianceCode='0000')
sw_info = dict(key='key', type='type', fqdn='eos')
cv_server_provision.switch_in_compliance(module, sw_info)
self.assertEqual(module.client.api.check_compliance.call_count, 1)
module.fail_json.assert_not_called()
def test_switch_in_compliance_fail(self):
''' Test switch_in_compliance fail.
'''
module = Mock()
module.client.api.check_compliance.return_value = dict(
complianceCode='0001')
sw_info = dict(key='key', type='type', fqdn='eos')
cv_server_provision.switch_in_compliance(module, sw_info)
self.assertEqual(module.client.api.check_compliance.call_count, 1)
module.fail_json.assert_called_with(
msg='Switch eos is not in compliance.'
' Returned compliance code 0001.')
def test_server_configurable_configlet_good(self):
''' Test server_configurable_configlet good.
'''
module = Mock()
module.params = dict(switch_name='eos')
configlets = [dict(name='configlet1', info='line'),
dict(name='eos-server', info='info')]
module.client.api.get_configlets_by_device_id.return_value = configlets
sw_info = dict(key='key', type='type', fqdn='eos')
result = cv_server_provision.server_configurable_configlet(module,
sw_info)
self.assertEqual(module.client.api.get_configlets_by_device_id.call_count, 1)
self.assertIsNotNone(result)
self.assertEqual(result['name'], 'eos-server')
self.assertEqual(result['info'], 'info')
def test_server_configurable_configlet_not_configurable(self):
''' Test server_configurable_configlet fail. No server configlet.
'''
module = Mock()
module.params = dict(switch_name='eos')
configlets = [dict(name='configlet1', info='line'),
dict(name='configlet2', info='info')]
module.client.api.get_configlets_by_device_id.return_value = configlets
sw_info = dict(key='key', type='type', fqdn='eos')
result = cv_server_provision.server_configurable_configlet(module, sw_info)
self.assertEqual(module.client.api.get_configlets_by_device_id.call_count, 1)
self.assertIsNone(result)
def test_server_configurable_configlet_no_configlets(self):
''' Test server_configurable_configlet fail. No switch configlets.
'''
module = Mock()
module.params = dict(switch_name='eos')
module.client.api.get_configlets_by_device_id.return_value = []
sw_info = dict(key='key', type='type', fqdn='eos')
result = cv_server_provision.server_configurable_configlet(module,
sw_info)
self.assertEqual(module.client.api.get_configlets_by_device_id.call_count, 1)
self.assertIsNone(result)
def test_port_configurable_good(self):
''' Test port_configurable user provided switch port in configlet.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='3')
config = '!\ninterface Ethernet3\n!\ninterface Ethernet4\n!'
configlet = dict(name='eos-server', config=config)
result = cv_server_provision.port_configurable(module, configlet)
self.assertTrue(result)
def test_port_configurable_fail(self):
''' Test port_configurable user provided switch port not in configlet.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='2')
config = '!\ninterface Ethernet3\n!\ninterface Ethernet4\n!'
configlet = dict(name='eos-server', config=config)
result = cv_server_provision.port_configurable(module, configlet)
self.assertFalse(result)
def test_port_configurable_fail_no_config(self):
''' Test port_configurable configlet empty.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='2')
config = ''
configlet = dict(name='eos-server', config=config)
result = cv_server_provision.port_configurable(module, configlet)
self.assertFalse(result)
def test_configlet_action_show_blank_config(self):
''' Test configlet_action show returns current port configuration.
'''
module = Mock()
module.params = dict(action='show', switch_name='eos', switch_port='3')
config = '!\ninterface Ethernet3\n!\ninterface Ethernet4\n!'
configlet = dict(name='eos-server', key='key', config=config)
result = cv_server_provision.configlet_action(module, configlet)
self.assertIsNotNone(result)
self.assertEqual(result['currentConfigBlock'], 'interface Ethernet3\n!')
module.client.api.update_configlet.assert_not_called()
@patch('ansible.modules.network.cloudvision.cv_server_provision.config_from_template')
def test_configlet_action_add_with_task(self, mock_template):
''' Test configlet_action add with change updates configlet and adds
proper info to return data. Including task spawned info.
'''
module = Mock()
module.params = dict(action='add', switch_name='eos', switch_port='3')
config = '!\ninterface Ethernet3\n!\ninterface Ethernet4\n!'
configlet = dict(name='eos-server', key='key', config=config)
template_config = ('interface Ethernet3\n description Host eos'
' managed by Ansible and Jinja template\n'
' load-interval 30\n'
' switchport\n'
' switchport mode trunk\n'
' no shutdown\n!')
mock_template.return_value = template_config
update_return = dict(data='Configlet eos-server successfully updated'
' and task initiated.')
module.client.api.update_configlet.return_value = update_return
result = cv_server_provision.configlet_action(module, configlet)
self.assertIsNotNone(result)
self.assertEqual(result['oldConfigBlock'], 'interface Ethernet3\n!')
full_config = '!\n' + template_config + '\ninterface Ethernet4\n!'
self.assertEqual(result['fullConfig'], full_config)
self.assertEqual(result['updateConfigletResponse'],
update_return['data'])
self.assertTrue(result['changed'])
self.assertTrue(result['taskCreated'])
self.assertEqual(module.client.api.update_configlet.call_count, 1)
@patch('ansible.modules.network.cloudvision.cv_server_provision.config_from_template')
def test_configlet_action_add_no_task(self, mock_template):
''' Test configlet_action add that doesn't change configlet adds proper
info to return data. Does not including any task info.
'''
module = Mock()
module.params = dict(action='add', switch_name='eos', switch_port='3')
config = ('!\ninterface Ethernet3\n description test\n'
'!\ninterface Ethernet4\n!')
configlet = dict(name='eos-server', key='key', config=config)
template_config = 'interface Ethernet3\n description test\n!'
mock_template.return_value = template_config
update_return = dict(data='Configlet eos-server successfully updated.')
module.client.api.update_configlet.return_value = update_return
result = cv_server_provision.configlet_action(module, configlet)
self.assertIsNotNone(result)
self.assertEqual(result['oldConfigBlock'],
'interface Ethernet3\n description test\n!')
self.assertEqual(result['fullConfig'], config)
self.assertEqual(result['updateConfigletResponse'],
update_return['data'])
self.assertNotIn('changed', result)
self.assertNotIn('taskCreated', result)
self.assertEqual(module.client.api.update_configlet.call_count, 1)
def test_configlet_action_remove_with_task(self):
''' Test configlet_action remove with change updates configlet and adds
proper info to return data. Including task spawned info.
'''
module = Mock()
module.params = dict(action='remove', switch_name='eos',
switch_port='3')
config = ('!\ninterface Ethernet3\n description test\n'
'!\ninterface Ethernet4\n!')
configlet = dict(name='eos-server', key='key', config=config)
update_return = dict(data='Configlet eos-server successfully updated'
' and task initiated.')
module.client.api.update_configlet.return_value = update_return
result = cv_server_provision.configlet_action(module, configlet)
self.assertIsNotNone(result)
self.assertEqual(result['oldConfigBlock'],
'interface Ethernet3\n description test\n!')
full_config = '!\ninterface Ethernet3\n!\ninterface Ethernet4\n!'
self.assertEqual(result['fullConfig'], full_config)
self.assertEqual(result['updateConfigletResponse'],
update_return['data'])
self.assertTrue(result['changed'])
self.assertTrue(result['taskCreated'])
self.assertEqual(module.client.api.update_configlet.call_count, 1)
def test_configlet_action_remove_no_task(self):
''' Test configlet_action with remove that doesn't change configlet and
adds proper info to return data. Does not including any task info.
'''
module = Mock()
module.params = dict(action='remove', switch_name='eos',
switch_port='3')
config = '!\ninterface Ethernet3\n!\ninterface Ethernet4\n!'
configlet = dict(name='eos-server', key='key', config=config)
update_return = dict(data='Configlet eos-server successfully updated.')
module.client.api.update_configlet.return_value = update_return
result = cv_server_provision.configlet_action(module, configlet)
self.assertIsNotNone(result)
self.assertEqual(result['oldConfigBlock'], 'interface Ethernet3\n!')
self.assertEqual(result['fullConfig'], config)
self.assertEqual(result['updateConfigletResponse'],
update_return['data'])
self.assertNotIn('changed', result)
self.assertNotIn('taskCreated', result)
self.assertEqual(module.client.api.update_configlet.call_count, 1)
def test_current_config_empty_config(self):
''' Test current_config with empty config for port
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='4')
config = '!\ninterface Ethernet3\n!\ninterface Ethernet4'
result = cv_server_provision.current_config(module, config)
self.assertIsNotNone(result)
self.assertEqual(result, 'interface Ethernet4')
def test_current_config_with_config(self):
''' Test current_config with config for port
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='3')
config = ('!\ninterface Ethernet3\n description test\n'
'!\ninterface Ethernet4\n!')
result = cv_server_provision.current_config(module, config)
self.assertIsNotNone(result)
self.assertEqual(result, 'interface Ethernet3\n description test\n!')
def test_current_config_no_match(self):
''' Test current_config with no entry for port
'''
module = Mock()
module.fail_json.side_effect = SystemExit
module.params = dict(switch_name='eos', switch_port='2')
config = '!\ninterface Ethernet3\n description test\n!'
self.assertRaises(SystemExit, cv_server_provision.current_config,
module, config)
def test_valid_template_true(self):
''' Test valid_template true
'''
template = 'interface Ethernet3\n description test\n!'
result = cv_server_provision.valid_template('3', template)
self.assertTrue(result)
def test_valid_template_false(self):
''' Test valid_template false
'''
template = 'interface Ethernet3\n description test\n!'
result = cv_server_provision.valid_template('4', template)
self.assertFalse(result)
@patch('jinja2.DebugUndefined')
@patch('jinja2.Environment')
@patch('jinja2.FileSystemLoader')
def test_config_from_template_no_template(self, mock_file_sys, mock_env,
mock_debug):
''' Test config_from_template good. No template.
'''
module = Mock()
module.fail_json.side_effect = SystemExit
module.params = dict(switch_name='eos', switch_port='3',
server_name='new', template='jinja.j2')
mock_file_sys.return_value = 'file'
mock_debug.return_value = 'debug'
env_mock = Mock()
env_mock.get_template.return_value = None
mock_env.return_value = env_mock
self.assertRaises(SystemExit, cv_server_provision.config_from_template,
module)
self.assertEqual(mock_file_sys.call_count, 1)
self.assertEqual(mock_env.call_count, 1)
self.assertEqual(module.fail_json.call_count, 1)
@patch('jinja2.meta.find_undeclared_variables')
@patch('jinja2.DebugUndefined')
@patch('jinja2.Environment')
@patch('jinja2.FileSystemLoader')
def test_config_from_template_good_no_vlan(self, mock_file_sys, mock_env, mock_debug,
mock_find):
''' Test config_from_template good. No port_vlan.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='3',
server_name='new', template='jinja.j2')
mock_file_sys.return_value = 'file'
mock_debug.return_value = 'debug'
template_mock = Mock()
template_mock.render.return_value = ('interface Ethernet3\n'
' description test\n'
' switchport\n'
' switchport mode trunk\n'
' no shutdown\n!')
env_mock = Mock()
env_mock.loader.get_source.return_value = ['one', 'two']
env_mock.parse.return_value = 'parsed'
env_mock.get_template.return_value = template_mock
mock_env.return_value = env_mock
mock_find.return_value = dict(server_name=None, switch_port=None)
result = cv_server_provision.config_from_template(module)
self.assertIsNotNone(result)
expected = ('interface Ethernet3\n'
' description test\n'
' switchport\n'
' switchport mode trunk\n'
' no shutdown\n!')
self.assertEqual(result, expected)
self.assertEqual(mock_file_sys.call_count, 1)
self.assertEqual(mock_env.call_count, 1)
module.fail_json.assert_not_called()
@patch('jinja2.meta.find_undeclared_variables')
@patch('jinja2.DebugUndefined')
@patch('jinja2.Environment')
@patch('jinja2.FileSystemLoader')
def test_config_from_template_good_vlan(self, mock_file_sys, mock_env, mock_debug,
mock_find):
''' Test config_from_template good. With port_vlan.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='3',
server_name='new', template='jinja.j2', port_vlan='7')
mock_file_sys.return_value = 'file'
mock_debug.return_value = 'debug'
template_mock = Mock()
template_mock.render.return_value = ('interface Ethernet3\n'
' description test\n'
' switchport\n'
' switchport access vlan 7\n'
' no shutdown\n!')
env_mock = Mock()
env_mock.loader.get_source.return_value = ['one', 'two']
env_mock.parse.return_value = 'parsed'
env_mock.get_template.return_value = template_mock
mock_env.return_value = env_mock
mock_find.return_value = dict(server_name=None, switch_port=None,
port_vlan=None)
result = cv_server_provision.config_from_template(module)
self.assertIsNotNone(result)
expected = ('interface Ethernet3\n'
' description test\n'
' switchport\n'
' switchport access vlan 7\n'
' no shutdown\n!')
self.assertEqual(result, expected)
self.assertEqual(mock_file_sys.call_count, 1)
self.assertEqual(mock_env.call_count, 1)
module.fail_json.assert_not_called()
@patch('jinja2.meta.find_undeclared_variables')
@patch('jinja2.DebugUndefined')
@patch('jinja2.Environment')
@patch('jinja2.FileSystemLoader')
def test_config_from_template_fail_wrong_port(self, mock_file_sys, mock_env,
mock_debug, mock_find):
''' Test config_from_template fail. Wrong port number in template.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='4',
server_name='new', template='jinja.j2')
mock_file_sys.return_value = 'file'
mock_debug.return_value = 'debug'
template_mock = Mock()
template_mock.render.return_value = ('interface Ethernet3\n'
' description test\n!')
env_mock = Mock()
env_mock.loader.get_source.return_value = ['one', 'two']
env_mock.parse.return_value = 'parsed'
env_mock.get_template.return_value = template_mock
mock_env.return_value = env_mock
mock_find.return_value = dict(server_name=None, switch_port=None)
result = cv_server_provision.config_from_template(module)
self.assertIsNotNone(result)
expected = 'interface Ethernet3\n description test\n!'
self.assertEqual(result, expected)
self.assertEqual(mock_file_sys.call_count, 1)
self.assertEqual(mock_env.call_count, 1)
module.fail_json.assert_called_with(msg='Template content does not'
' configure proper interface'
' - %s' % expected)
@patch('jinja2.meta.find_undeclared_variables')
@patch('jinja2.DebugUndefined')
@patch('jinja2.Environment')
@patch('jinja2.FileSystemLoader')
def test_config_from_template_fail_no_vlan(self, mock_file_sys, mock_env,
mock_debug, mock_find):
''' Test config_from_template fail. Template needs vlan but none provided.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='3',
server_name='new', template='jinja.j2',
port_vlan=None)
mock_file_sys.return_value = 'file'
mock_debug.return_value = 'debug'
template_mock = Mock()
template_mock.render.return_value = ('interface Ethernet3\n'
' description test\n!')
env_mock = Mock()
env_mock.loader.get_source.return_value = ['one', 'two']
env_mock.parse.return_value = 'parsed'
env_mock.get_template.return_value = template_mock
mock_env.return_value = env_mock
mock_find.return_value = dict(server_name=None, switch_port=None,
port_vlan=None)
result = cv_server_provision.config_from_template(module)
self.assertIsNotNone(result)
expected = 'interface Ethernet3\n description test\n!'
self.assertEqual(result, expected)
self.assertEqual(mock_file_sys.call_count, 1)
self.assertEqual(mock_env.call_count, 1)
module.fail_json.assert_called_with(msg='Template jinja.j2 requires a'
' vlan. Please re-run with vlan'
' number provided.')
def test_updated_configlet_content_add(self):
''' Test updated_configlet_content. Add config.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='3')
existing_config = '!\ninterface Ethernet3\n!\ninterface Ethernet4\n!'
new_config_block = 'interface Ethernet3\n description test\n!'
result = cv_server_provision.updated_configlet_content(module,
existing_config,
new_config_block)
expected = ('!\ninterface Ethernet3\n description test\n'
'!\ninterface Ethernet4\n!')
self.assertEqual(result, expected)
module.fail_json.assert_not_called()
def test_updated_configlet_content_remove(self):
''' Test updated_configlet_content. Remove config.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='3')
existing_config = ('!\ninterface Ethernet3\n description test\n'
'!\ninterface Ethernet4')
new_config_block = 'interface Ethernet3\n!'
result = cv_server_provision.updated_configlet_content(module,
existing_config,
new_config_block)
expected = '!\ninterface Ethernet3\n!\ninterface Ethernet4'
self.assertEqual(result, expected)
module.fail_json.assert_not_called()
def test_updated_configlet_content_no_match(self):
''' Test updated_configlet_content. Interface not in config.
'''
module = Mock()
module.fail_json.side_effect = SystemExit
module.params = dict(switch_name='eos', switch_port='2')
existing_config = '!\ninterface Ethernet3\n description test\n!'
new_config_block = 'interface Ethernet3\n!'
self.assertRaises(SystemExit,
cv_server_provision.updated_configlet_content,
module, existing_config, new_config_block)
@patch('time.sleep')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
def test_configlet_update_task_good_one_try(self, mock_info, mock_sleep):
''' Test configlet_update_task gets task after one try.
'''
module = Mock()
task = dict(data=dict(WORKFLOW_ACTION='Configlet Push'),
description='Configlet Assign',
workOrderId='7')
device_info = dict(taskIdList=[task])
mock_info.return_value = device_info
result = cv_server_provision.configlet_update_task(module)
self.assertEqual(result, '7')
mock_sleep.assert_not_called()
self.assertEqual(mock_info.call_count, 1)
@patch('time.sleep')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
def test_configlet_update_task_good_three_tries(self, mock_info, mock_sleep):
''' Test configlet_update_task gets task on third try.
'''
module = Mock()
task1 = dict(data=dict(WORKFLOW_ACTION='Configlet Push'),
description='Configlet Assign',
workOrderId='7')
task2 = dict(data=dict(WORKFLOW_ACTION='Nonsense'),
description='Configlet Assign',
workOrderId='700')
device_info = dict(taskIdList=[task1, task2])
mock_info.side_effect = [dict(), dict(), device_info]
result = cv_server_provision.configlet_update_task(module)
self.assertEqual(result, '7')
self.assertEqual(mock_sleep.call_count, 2)
self.assertEqual(mock_info.call_count, 3)
@patch('time.sleep')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
def test_configlet_update_task_no_task(self, mock_info, mock_sleep):
''' Test configlet_update_task does not get task after three tries.
'''
module = Mock()
mock_info.side_effect = [dict(), dict(), dict()]
result = cv_server_provision.configlet_update_task(module)
self.assertIsNone(result)
self.assertEqual(mock_sleep.call_count, 3)
self.assertEqual(mock_info.call_count, 3)
@patch('time.sleep')
def test_wait_for_task_completion_good_one_try(self, mock_time):
''' Test wait_for_task_completion completed. One Try.
'''
module = Mock()
module.client.api.get_task_by_id.return_value = dict(
workOrderUserDefinedStatus='Completed')
result = cv_server_provision.wait_for_task_completion(module, '7')
self.assertTrue(result)
self.assertEqual(module.client.api.get_task_by_id.call_count, 1)
module.fail_json.assert_not_called()
mock_time.assert_not_called()
@patch('time.sleep')
def test_wait_for_task_completion_good_three_tries(self, mock_time):
''' Test wait_for_task_completion completed. Three tries.
'''
module = Mock()
try_one_two = dict(workOrderUserDefinedStatus='Pending')
try_three = dict(workOrderUserDefinedStatus='Completed')
module.client.api.get_task_by_id.side_effect = [try_one_two,
try_one_two, try_three]
result = cv_server_provision.wait_for_task_completion(module, '7')
self.assertTrue(result)
self.assertEqual(module.client.api.get_task_by_id.call_count, 3)
module.fail_json.assert_not_called()
self.assertEqual(mock_time.call_count, 2)
@patch('time.sleep')
def test_wait_for_task_completion_fail(self, mock_time):
''' Test wait_for_task_completion failed.
'''
module = Mock()
try_one = dict(workOrderUserDefinedStatus='Failed')
try_two = dict(workOrderUserDefinedStatus='Completed')
module.client.api.get_task_by_id.side_effect = [try_one, try_two]
result = cv_server_provision.wait_for_task_completion(module, '7')
self.assertTrue(result)
self.assertEqual(module.client.api.get_task_by_id.call_count, 2)
text = ('Task 7 has reported status Failed. Please consult the CVP'
' admins for more information.')
module.fail_json.assert_called_with(msg=text)
self.assertEqual(mock_time.call_count, 1)
| gpl-3.0 |
phobson/pycvc | pycvc/validate.py | 2 | 1090 |
def groupby_col(groupby_col):
valid_groups = ['season', 'grouped_season', 'year', 'storm_bin', 'samplestart']
if groupby_col is None:
return groupby_col
elif groupby_col.lower() in valid_groups:
return groupby_col.lower()
else:
raise ValueError("{} is not a valid time group ({})".format(groupby_col, valid_groups))
def sampletype(sampletype):
""" Confirms that a given value is a valid sampletype and returns
the all lowercase version of it.
"""
if sampletype.lower() not in ('grab', 'composite'):
raise ValueError("`sampletype` must be 'composite' or 'grab'")
return sampletype.lower()
def rescol(rescol):
""" Comfirms that a give value is a valid results column and returns
the corresponding units column and results column.
"""
if rescol.lower() == 'concentration':
unitscol = 'units'
elif rescol.lower() == 'load_outflow':
unitscol = 'load_units'
else:
raise ValueError("`rescol` must be in ['concentration', 'load_outflow']")
return rescol.lower(), unitscol
| bsd-3-clause |
MayB/zulip | zerver/forms.py | 97 | 3911 | from __future__ import absolute_import
from django import forms
from django.core.exceptions import ValidationError
from django.utils.safestring import mark_safe
from django.contrib.auth.forms import SetPasswordForm, AuthenticationForm
from django.conf import settings
from zerver.models import Realm, get_user_profile_by_email, UserProfile, \
completely_open, resolve_email_to_domain, get_realm
from zerver.lib.actions import do_change_password, is_inactive
from zproject.backends import password_auth_enabled
import DNS
SIGNUP_STRING = u'Use a different e-mail address, or contact %s with questions.'%(settings.ZULIP_ADMINISTRATOR,)
def has_valid_realm(value):
# Checks if there is a realm without invite_required
# matching the domain of the input e-mail.
try:
realm = Realm.objects.get(domain=resolve_email_to_domain(value))
except Realm.DoesNotExist:
return False
return not realm.invite_required
def not_mit_mailing_list(value):
# I don't want ec-discuss signed up for Zulip
if "@mit.edu" in value:
username = value.rsplit("@", 1)[0]
# Check whether the user exists and can get mail.
try:
DNS.dnslookup("%s.pobox.ns.athena.mit.edu" % username, DNS.Type.TXT)
return True
except DNS.Base.ServerError, e:
if e.rcode == DNS.Status.NXDOMAIN:
raise ValidationError(mark_safe(u'That user does not exist at MIT or is a <a href="https://ist.mit.edu/email-lists">mailing list</a>. If you want to sign up an alias for Zulip, <a href="mailto:[email protected]">contact us</a>.'))
else:
raise
return True
class RegistrationForm(forms.Form):
full_name = forms.CharField(max_length=100)
# The required-ness of the password field gets overridden if it isn't
# actually required for a realm
password = forms.CharField(widget=forms.PasswordInput, max_length=100,
required=False)
if not settings.VOYAGER:
terms = forms.BooleanField(required=True)
class ToSForm(forms.Form):
full_name = forms.CharField(max_length=100)
terms = forms.BooleanField(required=True)
class HomepageForm(forms.Form):
# This form is important because it determines whether users can
# register for our product. Be careful when modifying the
# validators.
email = forms.EmailField(validators=[is_inactive,])
def __init__(self, *args, **kwargs):
self.domain = kwargs.get("domain")
if kwargs.has_key("domain"):
del kwargs["domain"]
super(HomepageForm, self).__init__(*args, **kwargs)
def clean_email(self):
data = self.cleaned_data['email']
if completely_open(self.domain) or has_valid_realm(data) and not_mit_mailing_list(data):
return data
raise ValidationError(mark_safe(
u'Your e-mail does not match any existing open organization. ' \
+ SIGNUP_STRING))
class LoggingSetPasswordForm(SetPasswordForm):
def save(self, commit=True):
do_change_password(self.user, self.cleaned_data['new_password1'],
log=True, commit=commit)
return self.user
class CreateUserForm(forms.Form):
full_name = forms.CharField(max_length=100)
email = forms.EmailField()
class OurAuthenticationForm(AuthenticationForm):
def clean_username(self):
email = self.cleaned_data['username']
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return email
if user_profile.realm.deactivated:
error_msg = u"""Sorry for the trouble, but %s has been deactivated.
Please contact [email protected] to reactivate this group.""" % (
user_profile.realm.name,)
raise ValidationError(mark_safe(error_msg))
return email
| apache-2.0 |
jaimahajan1997/sympy | sympy/physics/tests/test_hydrogen.py | 83 | 4122 | from sympy import exp, integrate, oo, S, simplify, sqrt, symbols
from sympy.core.compatibility import range
from sympy.physics.hydrogen import R_nl, E_nl, E_nl_dirac
from sympy.utilities.pytest import raises
n, r, Z = symbols('n r Z')
def feq(a, b, max_relative_error=1e-12, max_absolute_error=1e-12):
a = float(a)
b = float(b)
# if the numbers are close enough (absolutely), then they are equal
if abs(a - b) < max_absolute_error:
return True
# if not, they can still be equal if their relative error is small
if abs(b) > abs(a):
relative_error = abs((a - b)/b)
else:
relative_error = abs((a - b)/a)
return relative_error <= max_relative_error
def test_wavefunction():
a = 1/Z
R = {
(1, 0): 2*sqrt(1/a**3) * exp(-r/a),
(2, 0): sqrt(1/(2*a**3)) * exp(-r/(2*a)) * (1 - r/(2*a)),
(2, 1): S(1)/2 * sqrt(1/(6*a**3)) * exp(-r/(2*a)) * r/a,
(3, 0): S(2)/3 * sqrt(1/(3*a**3)) * exp(-r/(3*a)) *
(1 - 2*r/(3*a) + S(2)/27 * (r/a)**2),
(3, 1): S(4)/27 * sqrt(2/(3*a**3)) * exp(-r/(3*a)) *
(1 - r/(6*a)) * r/a,
(3, 2): S(2)/81 * sqrt(2/(15*a**3)) * exp(-r/(3*a)) * (r/a)**2,
(4, 0): S(1)/4 * sqrt(1/a**3) * exp(-r/(4*a)) *
(1 - 3*r/(4*a) + S(1)/8 * (r/a)**2 - S(1)/192 * (r/a)**3),
(4, 1): S(1)/16 * sqrt(5/(3*a**3)) * exp(-r/(4*a)) *
(1 - r/(4*a) + S(1)/80 * (r/a)**2) * (r/a),
(4, 2): S(1)/64 * sqrt(1/(5*a**3)) * exp(-r/(4*a)) *
(1 - r/(12*a)) * (r/a)**2,
(4, 3): S(1)/768 * sqrt(1/(35*a**3)) * exp(-r/(4*a)) * (r/a)**3,
}
for n, l in R:
assert simplify(R_nl(n, l, r, Z) - R[(n, l)]) == 0
def test_norm():
# Maximum "n" which is tested:
n_max = 2 # it works, but is slow, for n_max > 2
for n in range(n_max + 1):
for l in range(n):
assert integrate(R_nl(n, l, r)**2 * r**2, (r, 0, oo)) == 1
def test_hydrogen_energies():
assert E_nl(n, Z) == -Z**2/(2*n**2)
assert E_nl(n) == -1/(2*n**2)
assert E_nl(1, 47) == -S(47)**2/(2*1**2)
assert E_nl(2, 47) == -S(47)**2/(2*2**2)
assert E_nl(1) == -S(1)/(2*1**2)
assert E_nl(2) == -S(1)/(2*2**2)
assert E_nl(3) == -S(1)/(2*3**2)
assert E_nl(4) == -S(1)/(2*4**2)
assert E_nl(100) == -S(1)/(2*100**2)
raises(ValueError, lambda: E_nl(0))
def test_hydrogen_energies_relat():
# First test exact formulas for small "c" so that we get nice expressions:
assert E_nl_dirac(2, 0, Z=1, c=1) == 1/sqrt(2) - 1
assert simplify(E_nl_dirac(2, 0, Z=1, c=2) - ( (8*sqrt(3) + 16)
/ sqrt(16*sqrt(3) + 32) - 4)) == 0
assert simplify(E_nl_dirac(2, 0, Z=1, c=3) - ( (54*sqrt(2) + 81)
/ sqrt(108*sqrt(2) + 162) - 9)) == 0
# Now test for almost the correct speed of light, without floating point
# numbers:
assert simplify(E_nl_dirac(2, 0, Z=1, c=137) - ( (352275361 + 10285412 *
sqrt(1173)) / sqrt(704550722 + 20570824 * sqrt(1173)) - 18769)) == 0
assert simplify(E_nl_dirac(2, 0, Z=82, c=137) - ( (352275361 + 2571353 *
sqrt(12045)) / sqrt(704550722 + 5142706*sqrt(12045)) - 18769)) == 0
# Test using exact speed of light, and compare against the nonrelativistic
# energies:
for n in range(1, 5):
for l in range(n):
assert feq(E_nl_dirac(n, l), E_nl(n), 1e-5, 1e-5)
if l > 0:
assert feq(E_nl_dirac(n, l, False), E_nl(n), 1e-5, 1e-5)
Z = 2
for n in range(1, 5):
for l in range(n):
assert feq(E_nl_dirac(n, l, Z=Z), E_nl(n, Z), 1e-4, 1e-4)
if l > 0:
assert feq(E_nl_dirac(n, l, False, Z), E_nl(n, Z), 1e-4, 1e-4)
Z = 3
for n in range(1, 5):
for l in range(n):
assert feq(E_nl_dirac(n, l, Z=Z), E_nl(n, Z), 1e-3, 1e-3)
if l > 0:
assert feq(E_nl_dirac(n, l, False, Z), E_nl(n, Z), 1e-3, 1e-3)
# Test the exceptions:
raises(ValueError, lambda: E_nl_dirac(0, 0))
raises(ValueError, lambda: E_nl_dirac(1, -1))
raises(ValueError, lambda: E_nl_dirac(1, 0, False))
| bsd-3-clause |
Lh4cKg/kivy | kivy/lib/osc/oscAPI.py | 3 | 9354 | # pylint: disable=W0611
''' simpleOSC 0.2
ixi software - July, 2006
www.ixi-software.net
simple API for the Open SoundControl for Python (by Daniel Holth, Clinton
McChesney --> pyKit.tar.gz file at http://wiretap.stetson.edu)
Documentation at http://wiretap.stetson.edu/docs/pyKit/
The main aim of this implementation is to provide with a simple way to deal
with the OSC implementation that makes life easier to those who don't have
understanding of sockets or programming. This would not be on your screen without the help
of Daniel Holth.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Thanks for the support to Buchsenhausen, Innsbruck, Austria.
'''
import OSC
import socket, os, time, errno, sys
from threading import Lock
from kivy.logger import Logger
try:
# multiprocessing support is not good on window
if sys.platform in ('win32', 'cygwin'):
raise
use_multiprocessing = True
from multiprocessing import Process, Queue, Value
import multiprocessing.synchronize
Logger.info('OSC: using <multiprocessing> for socket')
except:
use_multiprocessing = False
from threading import Thread
Logger.info('OSC: using <thread> for socket')
# globals
outSocket = 0
oscThreads = {}
oscLock = Lock()
if use_multiprocessing:
def _readQueue(thread_id=None):
global oscThreads
for id in oscThreads:
if thread_id is not None:
if id != thread_id:
continue
thread = oscThreads[id]
try:
while True:
message = thread.queue.get_nowait()
thread.addressManager.handle(message)
except:
pass
class _OSCServer(Process):
def __init__(self, **kwargs):
self.addressManager = OSC.CallbackManager()
self.queue = Queue()
Process.__init__(self, args=(self.queue,))
self.daemon = True
self._isRunning = Value('b', True)
self._haveSocket= Value('b', False)
def _queue_message(self, message):
self.queue.put(message)
def _get_isRunning(self):
return self._isRunning.value
def _set_isRunning(self, value):
self._isRunning.value = value
isRunning = property(_get_isRunning, _set_isRunning)
def _get_haveSocket(self):
return self._haveSocket.value
def _set_haveSocket(self, value):
self._haveSocket.value = value
haveSocket = property(_get_haveSocket, _set_haveSocket)
else:
def _readQueue(thread_id=None):
pass
class _OSCServer(Thread):
def __init__(self, **kwargs):
Thread.__init__(self)
self.addressManager = OSC.CallbackManager()
self.daemon = True
self.isRunning = True
self.haveSocket = False
def _queue_message(self, message):
self.addressManager.handle(message)
def init() :
'''instantiates address manager and outsocket as globals
'''
assert('Not used anymore')
def bind(oscid, func, oscaddress):
'''bind given oscaddresses with given functions in address manager
'''
global oscThreads
thread = oscThreads.get(oscid, None)
if thread is None:
assert('Unknown thread')
thread.addressManager.add(func, oscaddress)
def sendMsg(oscAddress, dataArray=[], ipAddr='127.0.0.1', port=9000) :
'''create and send normal OSC msgs
defaults to '127.0.0.1', port 9000
'''
oscLock.acquire()
outSocket.sendto( createBinaryMsg(oscAddress, dataArray), (ipAddr, port))
oscLock.release()
def createBundle():
'''create bundled type of OSC messages
'''
b = OSC.OSCMessage()
b.address = ""
b.append("#bundle")
b.append(0)
b.append(0)
return b
def appendToBundle(bundle, oscAddress, dataArray):
'''create OSC mesage and append it to a given bundle
'''
bundle.append( createBinaryMsg(oscAddress, dataArray), 'b')
def sendBundle(bundle, ipAddr='127.0.0.1', port=9000) :
'''convert bundle to a binary and send it
'''
oscLock.acquire()
outSocket.sendto(bundle.message, (ipAddr, port))
oscLock.release()
def createBinaryMsg(oscAddress, dataArray):
'''create and return general type binary OSC msg
'''
m = OSC.OSCMessage()
m.address = oscAddress
for x in dataArray:
m.append(x)
return m.getBinary()
def readQueue(thread_id=None):
'''Read queues from all threads, and dispatch message.
This must be call in the main thread.
You can pass the thread id to deque message from a specific thread.
This id is returned from the listen() function'''
return _readQueue(thread_id)
################################ receive osc from The Other.
class OSCServer(_OSCServer):
def __init__(self, **kwargs):
kwargs.setdefault('ipAddr', '127.0.0.1')
kwargs.setdefault('port', 9001)
super(OSCServer, self).__init__()
self.ipAddr = kwargs.get('ipAddr')
self.port = kwargs.get('port')
def run(self):
self.haveSocket = False
# create socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# fix trouble if python leave without cleaning well the socket
# not needed under windows, he can reuse addr even if the socket
# are in fin2 or wait state.
if os.name in ['posix', 'mac'] and hasattr(socket, 'SO_REUSEADDR'):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# try to bind the socket, retry if necessary
while not self.haveSocket and self.isRunning:
try :
self.socket.bind((self.ipAddr, self.port))
self.socket.settimeout(0.5)
self.haveSocket = True
except socket.error, e:
error, message = e.args
# special handle for EADDRINUSE
if error == errno.EADDRINUSE:
Logger.error('OSC: Address %s:%i already in use, retry in 2 second' % (self.ipAddr, self.port))
else:
self.haveSocket = False
# sleep 2 second before retry
time.sleep(2)
Logger.info('OSC: listening for Tuio on %s:%i' % (self.ipAddr, self.port))
while self.isRunning:
try:
message = self.socket.recv(65535)
self._queue_message(message)
except Exception, e:
if type(e) == socket.timeout:
continue
Logger.exception('OSC: Error in Tuio recv()')
return 'no data arrived'
def listen(ipAddr='127.0.0.1', port=9001):
'''Creates a new thread listening to that port
defaults to ipAddr='127.0.0.1', port 9001
'''
global oscThreads
id = '%s:%d' % (ipAddr, port)
if id in oscThreads:
return
Logger.debug('OSC: Start thread <%s>' % id)
oscThreads[id] = OSCServer(ipAddr=ipAddr, port=port)
oscThreads[id].start()
return id
def dontListen(id = None):
'''closes the socket and kills the thread
'''
global oscThreads
if id and id in oscThreads:
ids = [id]
else:
ids = oscThreads.keys()
for id in ids:
#oscThreads[id].socket.close()
Logger.debug('OSC: Stop thread <%s>' % id)
oscThreads[id].isRunning = False
oscThreads[id].join()
Logger.debug('OSC: Stop thread <%s> finished' % id)
del oscThreads[id]
if __name__ == '__main__':
# example of how to use oscAPI
init()
listen() # defaults to "127.0.0.1", 9001
time.sleep(5)
# add addresses to callback manager
def printStuff(msg):
'''deals with "print" tagged OSC addresses
'''
print "printing in the printStuff function ", msg
print "the oscaddress is ", msg[0]
print "the value is ", msg[2]
bind(printStuff, "/test")
#send normal msg, two ways
sendMsg("/test", [1, 2, 3], "127.0.0.1", 9000)
sendMsg("/test2", [1, 2, 3]) # defaults to "127.0.0.1", 9000
sendMsg("/hello") # defaults to [], "127.0.0.1", 9000
# create and send bundle, to ways to send
bundle = createBundle()
appendToBundle(bundle, "/testing/bundles", [1, 2, 3])
appendToBundle(bundle, "/testing/bundles", [4, 5, 6])
sendBundle(bundle, "127.0.0.1", 9000)
sendBundle(bundle) # defaults to "127.0.0.1", 9000
dontListen() # finally close the connection bfore exiting or program
| lgpl-3.0 |
jbzdak/edx-platform | common/djangoapps/enrollment/errors.py | 84 | 1396 | """All Error Types pertaining to Enrollment."""
class CourseEnrollmentError(Exception):
"""Generic Course Enrollment Error.
Describes any error that may occur when reading or updating enrollment information for a user or a course.
"""
def __init__(self, msg, data=None):
super(CourseEnrollmentError, self).__init__(msg)
# Corresponding information to help resolve the error.
self.data = data
class CourseNotFoundError(CourseEnrollmentError):
pass
class UserNotFoundError(CourseEnrollmentError):
pass
class CourseEnrollmentClosedError(CourseEnrollmentError):
pass
class CourseEnrollmentFullError(CourseEnrollmentError):
pass
class CourseEnrollmentExistsError(CourseEnrollmentError):
enrollment = None
def __init__(self, message, enrollment):
super(CourseEnrollmentExistsError, self).__init__(message)
self.enrollment = enrollment
class CourseModeNotFoundError(CourseEnrollmentError):
"""The requested course mode could not be found."""
pass
class EnrollmentNotFoundError(CourseEnrollmentError):
"""The requested enrollment could not be found."""
pass
class EnrollmentApiLoadError(CourseEnrollmentError):
"""The data API could not be loaded."""
pass
class InvalidEnrollmentAttribute(CourseEnrollmentError):
"""Enrollment Attributes could not be validated"""
pass
| agpl-3.0 |
israelbenatar/boto | tests/integration/sdb/test_cert_verification.py | 126 | 1544 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.sdb
class SDBCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
sdb = True
regions = boto.sdb.regions()
def sample_service_call(self, conn):
conn.get_all_domains()
| mit |
endticket/uwsgi | tests/signals.py | 21 | 1027 | import uwsgi
def hello_signal(num, payload):
print "i am the signal %d" % num
def hello_signal2(num, payload):
print "i am the signal %d with payload: %s" % (num, payload)
def hello_file(num, filename):
print "file %s has been modified !!!" % filename
def hello_timer(num, secs):
print "%s seconds elapsed" % secs
# uwsgi.register_signal(30, uwsgi.SIGNAL_KIND_WORKER, hello_signal)
uwsgi.register_signal(30, "workers", hello_signal)
uwsgi.register_signal(22, "worker", hello_signal2, "*** PAYLOAD FOO ***")
uwsgi.register_file_monitor(3, "/tmp", "workers", hello_file)
uwsgi.register_timer(26, 2, "worker", hello_timer)
uwsgi.register_timer(17, 4, "worker2", hello_timer)
uwsgi.register_timer(5, 8, "worker3", hello_timer)
def application(env, start_response):
start_response('200 Ok', [('Content-Type', 'text/html')])
# this will send a signal to the master that will report it to the first available worker
uwsgi.signal(30)
uwsgi.signal(22)
return "signals sent to workers"
| gpl-2.0 |
nikitasingh981/scikit-learn | sklearn/externals/joblib/pool.py | 13 | 25147 | """Custom implementation of multiprocessing.Pool with custom pickler.
This module provides efficient ways of working with data stored in
shared memory with numpy.memmap arrays without inducing any memory
copy between the parent and child processes.
This module should not be imported if multiprocessing is not
available as it implements subclasses of multiprocessing Pool
that uses a custom alternative to SimpleQueue.
"""
# Author: Olivier Grisel <[email protected]>
# Copyright: 2012, Olivier Grisel
# License: BSD 3 clause
from mmap import mmap
import errno
import os
import stat
import sys
import threading
import atexit
import tempfile
import shutil
import warnings
from time import sleep
try:
WindowsError
except NameError:
WindowsError = type(None)
from pickle import whichmodule
try:
# Python 2 compat
from cPickle import loads
from cPickle import dumps
except ImportError:
from pickle import loads
from pickle import dumps
import copyreg
# Customizable pure Python pickler in Python 2
# customizable C-optimized pickler under Python 3.3+
from pickle import Pickler
from pickle import HIGHEST_PROTOCOL
from io import BytesIO
from ._multiprocessing_helpers import mp, assert_spawning
# We need the class definition to derive from it not the multiprocessing.Pool
# factory function
from multiprocessing.pool import Pool
try:
import numpy as np
from numpy.lib.stride_tricks import as_strided
except ImportError:
np = None
from .numpy_pickle import load
from .numpy_pickle import dump
from .hashing import hash
from .backports import make_memmap
# Some system have a ramdisk mounted by default, we can use it instead of /tmp
# as the default folder to dump big arrays to share with subprocesses
SYSTEM_SHARED_MEM_FS = '/dev/shm'
# Folder and file permissions to chmod temporary files generated by the
# memmaping pool. Only the owner of the Python process can access the
# temporary files and folder.
FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR
###############################################################################
# Support for efficient transient pickling of numpy data structures
def _get_backing_memmap(a):
"""Recursively look up the original np.memmap instance base if any."""
b = getattr(a, 'base', None)
if b is None:
# TODO: check scipy sparse datastructure if scipy is installed
# a nor its descendants do not have a memmap base
return None
elif isinstance(b, mmap):
# a is already a real memmap instance.
return a
else:
# Recursive exploration of the base ancestry
return _get_backing_memmap(b)
def has_shareable_memory(a):
"""Return True if a is backed by some mmap buffer directly or not."""
return _get_backing_memmap(a) is not None
def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides,
total_buffer_len):
"""Reconstruct an array view on a memory mapped file."""
if mode == 'w+':
# Do not zero the original data when unpickling
mode = 'r+'
if strides is None:
# Simple, contiguous memmap
return make_memmap(filename, dtype=dtype, shape=shape, mode=mode,
offset=offset, order=order)
else:
# For non-contiguous data, memmap the total enclosing buffer and then
# extract the non-contiguous view with the stride-tricks API
base = make_memmap(filename, dtype=dtype, shape=total_buffer_len,
mode=mode, offset=offset, order=order)
return as_strided(base, shape=shape, strides=strides)
def _reduce_memmap_backed(a, m):
"""Pickling reduction for memmap backed arrays.
a is expected to be an instance of np.ndarray (or np.memmap)
m is expected to be an instance of np.memmap on the top of the ``base``
attribute ancestry of a. ``m.base`` should be the real python mmap object.
"""
# offset that comes from the striding differences between a and m
a_start, a_end = np.byte_bounds(a)
m_start = np.byte_bounds(m)[0]
offset = a_start - m_start
# offset from the backing memmap
offset += m.offset
if m.flags['F_CONTIGUOUS']:
order = 'F'
else:
# The backing memmap buffer is necessarily contiguous hence C if not
# Fortran
order = 'C'
if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']:
# If the array is a contiguous view, no need to pass the strides
strides = None
total_buffer_len = None
else:
# Compute the total number of items to map from which the strided
# view will be extracted.
strides = a.strides
total_buffer_len = (a_end - a_start) // a.itemsize
return (_strided_from_memmap,
(m.filename, a.dtype, m.mode, offset, order, a.shape, strides,
total_buffer_len))
def reduce_memmap(a):
"""Pickle the descriptors of a memmap instance to reopen on same file."""
m = _get_backing_memmap(a)
if m is not None:
# m is a real mmap backed memmap instance, reduce a preserving striding
# information
return _reduce_memmap_backed(a, m)
else:
# This memmap instance is actually backed by a regular in-memory
# buffer: this can happen when using binary operators on numpy.memmap
# instances
return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),))
class ArrayMemmapReducer(object):
"""Reducer callable to dump large arrays to memmap files.
Parameters
----------
max_nbytes: int
Threshold to trigger memmaping of large arrays to files created
a folder.
temp_folder: str
Path of a folder where files for backing memmaped arrays are created.
mmap_mode: 'r', 'r+' or 'c'
Mode for the created memmap datastructure. See the documentation of
numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
automatically to avoid zeroing the data on unpickling.
verbose: int, optional, 0 by default
If verbose > 0, memmap creations are logged.
If verbose > 1, both memmap creations, reuse and array pickling are
logged.
prewarm: bool, optional, False by default.
Force a read on newly memmaped array to make sure that OS pre-cache it
memory. This can be useful to avoid concurrent disk access when the
same data array is passed to different worker processes.
"""
def __init__(self, max_nbytes, temp_folder, mmap_mode, verbose=0,
context_id=None, prewarm=True):
self._max_nbytes = max_nbytes
self._temp_folder = temp_folder
self._mmap_mode = mmap_mode
self.verbose = int(verbose)
self._prewarm = prewarm
if context_id is not None:
warnings.warn('context_id is deprecated and ignored in joblib'
' 0.9.4 and will be removed in 0.11',
DeprecationWarning)
def __call__(self, a):
m = _get_backing_memmap(a)
if m is not None:
# a is already backed by a memmap file, let's reuse it directly
return _reduce_memmap_backed(a, m)
if (not a.dtype.hasobject
and self._max_nbytes is not None
and a.nbytes > self._max_nbytes):
# check that the folder exists (lazily create the pool temp folder
# if required)
try:
os.makedirs(self._temp_folder)
os.chmod(self._temp_folder, FOLDER_PERMISSIONS)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
# Find a unique, concurrent safe filename for writing the
# content of this array only once.
basename = "%d-%d-%s.pkl" % (
os.getpid(), id(threading.current_thread()), hash(a))
filename = os.path.join(self._temp_folder, basename)
# In case the same array with the same content is passed several
# times to the pool subprocess children, serialize it only once
# XXX: implement an explicit reference counting scheme to make it
# possible to delete temporary files as soon as the workers are
# done processing this data.
if not os.path.exists(filename):
if self.verbose > 0:
print("Memmaping (shape=%r, dtype=%s) to new file %s" % (
a.shape, a.dtype, filename))
for dumped_filename in dump(a, filename):
os.chmod(dumped_filename, FILE_PERMISSIONS)
if self._prewarm:
# Warm up the data to avoid concurrent disk access in
# multiple children processes
load(filename, mmap_mode=self._mmap_mode).max()
elif self.verbose > 1:
print("Memmaping (shape=%s, dtype=%s) to old file %s" % (
a.shape, a.dtype, filename))
# The worker process will use joblib.load to memmap the data
return (load, (filename, self._mmap_mode))
else:
# do not convert a into memmap, let pickler do its usual copy with
# the default system pickler
if self.verbose > 1:
print("Pickling array (shape=%r, dtype=%s)." % (
a.shape, a.dtype))
return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),))
###############################################################################
# Enable custom pickling in Pool queues
class CustomizablePickler(Pickler):
"""Pickler that accepts custom reducers.
HIGHEST_PROTOCOL is selected by default as this pickler is used
to pickle ephemeral datastructures for interprocess communication
hence no backward compatibility is required.
`reducers` is expected to be a dictionary with key/values
being `(type, callable)` pairs where `callable` is a function that
give an instance of `type` will return a tuple `(constructor,
tuple_of_objects)` to rebuild an instance out of the pickled
`tuple_of_objects` as would return a `__reduce__` method. See the
standard library documentation on pickling for more details.
"""
# We override the pure Python pickler as its the only way to be able to
# customize the dispatch table without side effects in Python 2.7
# to 3.2. For Python 3.3+ leverage the new dispatch_table
# feature from http://bugs.python.org/issue14166 that makes it possible
# to use the C implementation of the Pickler which is faster.
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
Pickler.__init__(self, writer, protocol=protocol)
if reducers is None:
reducers = {}
if hasattr(Pickler, 'dispatch'):
# Make the dispatch registry an instance level attribute instead of
# a reference to the class dictionary under Python 2
self.dispatch = Pickler.dispatch.copy()
else:
# Under Python 3 initialize the dispatch table with a copy of the
# default registry
self.dispatch_table = copyreg.dispatch_table.copy()
for type, reduce_func in reducers.items():
self.register(type, reduce_func)
def register(self, type, reduce_func):
"""Attach a reducer function to a given type in the dispatch table."""
if hasattr(Pickler, 'dispatch'):
# Python 2 pickler dispatching is not explicitly customizable.
# Let us use a closure to workaround this limitation.
def dispatcher(self, obj):
reduced = reduce_func(obj)
self.save_reduce(obj=obj, *reduced)
self.dispatch[type] = dispatcher
else:
self.dispatch_table[type] = reduce_func
class CustomizablePicklingQueue(object):
"""Locked Pipe implementation that uses a customizable pickler.
This class is an alternative to the multiprocessing implementation
of SimpleQueue in order to make it possible to pass custom
pickling reducers, for instance to avoid memory copy when passing
memory mapped datastructures.
`reducers` is expected to be a dict with key / values being
`(type, callable)` pairs where `callable` is a function that, given an
instance of `type`, will return a tuple `(constructor, tuple_of_objects)`
to rebuild an instance out of the pickled `tuple_of_objects` as would
return a `__reduce__` method.
See the standard library documentation on pickling for more details.
"""
def __init__(self, context, reducers=None):
self._reducers = reducers
self._reader, self._writer = context.Pipe(duplex=False)
self._rlock = context.Lock()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = context.Lock()
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock,
self._reducers)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock,
self._reducers) = state
self._make_methods()
def empty(self):
return not self._reader.poll()
def _make_methods(self):
self._recv = recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._reducers:
def send(obj):
buffer = BytesIO()
CustomizablePickler(buffer, self._reducers).dump(obj)
self._writer.send_bytes(buffer.getvalue())
self._send = send
else:
self._send = send = self._writer.send
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self.put = send
else:
wlock_acquire, wlock_release = (
self._wlock.acquire, self._wlock.release)
def put(obj):
wlock_acquire()
try:
return send(obj)
finally:
wlock_release()
self.put = put
class PicklingPool(Pool):
"""Pool implementation with customizable pickling reducers.
This is useful to control how data is shipped between processes
and makes it possible to use shared memory without useless
copies induces by the default pickling methods of the original
objects passed as arguments to dispatch.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that, given an instance of `type`, will return a
tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the
pickled `tuple_of_objects` as would return a `__reduce__` method.
See the standard library documentation about pickling for more details.
"""
def __init__(self, processes=None, forward_reducers=None,
backward_reducers=None, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
self._forward_reducers = forward_reducers
self._backward_reducers = backward_reducers
poolargs = dict(processes=processes)
poolargs.update(kwargs)
super(PicklingPool, self).__init__(**poolargs)
def _setup_queues(self):
context = getattr(self, '_ctx', mp)
self._inqueue = CustomizablePicklingQueue(context,
self._forward_reducers)
self._outqueue = CustomizablePicklingQueue(context,
self._backward_reducers)
self._quick_put = self._inqueue._send
self._quick_get = self._outqueue._recv
def delete_folder(folder_path):
"""Utility function to cleanup a temporary folder if still existing."""
try:
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
except WindowsError:
warnings.warn("Failed to clean temporary folder: %s" % folder_path)
class MemmapingPool(PicklingPool):
"""Process pool that shares large arrays to avoid memory copy.
This drop-in replacement for `multiprocessing.pool.Pool` makes
it possible to work efficiently with shared memory in a numpy
context.
Existing instances of numpy.memmap are preserved: the child
suprocesses will have access to the same shared memory in the
original mode except for the 'w+' mode that is automatically
transformed as 'r+' to avoid zeroing the original data upon
instantiation.
Furthermore large arrays from the parent process are automatically
dumped to a temporary folder on the filesystem such as child
processes to access their content via memmaping (file system
backed shared memory).
Note: it is important to call the terminate method to collect
the temporary folder used by the pool.
Parameters
----------
processes: int, optional
Number of worker processes running concurrently in the pool.
initializer: callable, optional
Callable executed on worker process creation.
initargs: tuple, optional
Arguments passed to the initializer callable.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
max_nbytes int or None, optional, 1e6 by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder.
Use None to disable memmaping of large arrays.
mmap_mode: {'r+', 'r', 'w+', 'c'}
Memmapping mode for numpy arrays passed to workers.
See 'max_nbytes' parameter documentation for more details.
forward_reducers: dictionary, optional
Reducers used to pickle objects passed from master to worker
processes: see below.
backward_reducers: dictionary, optional
Reducers used to pickle return values from workers back to the
master process.
verbose: int, optional
Make it possible to monitor how the communication of numpy arrays
with the subprocess is handled (pickling or memmaping)
prewarm: bool or str, optional, "auto" by default.
If True, force a read on newly memmaped array to make sure that OS pre-
cache it in memory. This can be useful to avoid concurrent disk access
when the same data array is passed to different worker processes.
If "auto" (by default), prewarm is set to True, unless the Linux shared
memory partition /dev/shm is available and used as temp_folder.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that give an instance of `type` will return
a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
of the pickled `tuple_of_objects` as would return a `__reduce__`
method. See the standard library documentation on pickling for more
details.
"""
def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6,
mmap_mode='r', forward_reducers=None, backward_reducers=None,
verbose=0, context_id=None, prewarm=False, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
if context_id is not None:
warnings.warn('context_id is deprecated and ignored in joblib'
' 0.9.4 and will be removed in 0.11',
DeprecationWarning)
# Prepare a sub-folder name for the serialization of this particular
# pool instance (do not create in advance to spare FS write access if
# no array is to be dumped):
use_shared_mem = False
pool_folder_name = "joblib_memmaping_pool_%d_%d" % (
os.getpid(), id(self))
if temp_folder is None:
temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None)
if temp_folder is None:
if os.path.exists(SYSTEM_SHARED_MEM_FS):
try:
temp_folder = SYSTEM_SHARED_MEM_FS
pool_folder = os.path.join(temp_folder, pool_folder_name)
if not os.path.exists(pool_folder):
os.makedirs(pool_folder)
use_shared_mem = True
except IOError:
# Missing rights in the the /dev/shm partition,
# fallback to regular temp folder.
temp_folder = None
if temp_folder is None:
# Fallback to the default tmp folder, typically /tmp
temp_folder = tempfile.gettempdir()
temp_folder = os.path.abspath(os.path.expanduser(temp_folder))
pool_folder = os.path.join(temp_folder, pool_folder_name)
self._temp_folder = pool_folder
# Register the garbage collector at program exit in case caller forgets
# to call terminate explicitly: note we do not pass any reference to
# self to ensure that this callback won't prevent garbage collection of
# the pool instance and related file handler resources such as POSIX
# semaphores and pipes
pool_module_name = whichmodule(delete_folder, 'delete_folder')
def _cleanup():
# In some cases the Python runtime seems to set delete_folder to
# None just before exiting when accessing the delete_folder
# function from the closure namespace. So instead we reimport
# the delete_folder function explicitly.
# https://github.com/joblib/joblib/issues/328
# We cannot just use from 'joblib.pool import delete_folder'
# because joblib should only use relative imports to allow
# easy vendoring.
delete_folder = __import__(
pool_module_name, fromlist=['delete_folder']).delete_folder
delete_folder(pool_folder)
atexit.register(_cleanup)
if np is not None:
# Register smart numpy.ndarray reducers that detects memmap backed
# arrays and that is alse able to dump to memmap large in-memory
# arrays over the max_nbytes threshold
if prewarm == "auto":
prewarm = not use_shared_mem
forward_reduce_ndarray = ArrayMemmapReducer(
max_nbytes, pool_folder, mmap_mode, verbose,
prewarm=prewarm)
forward_reducers[np.ndarray] = forward_reduce_ndarray
forward_reducers[np.memmap] = reduce_memmap
# Communication from child process to the parent process always
# pickles in-memory numpy.ndarray without dumping them as memmap
# to avoid confusing the caller and make it tricky to collect the
# temporary folder
backward_reduce_ndarray = ArrayMemmapReducer(
None, pool_folder, mmap_mode, verbose)
backward_reducers[np.ndarray] = backward_reduce_ndarray
backward_reducers[np.memmap] = reduce_memmap
poolargs = dict(
processes=processes,
forward_reducers=forward_reducers,
backward_reducers=backward_reducers)
poolargs.update(kwargs)
super(MemmapingPool, self).__init__(**poolargs)
def terminate(self):
n_retries = 10
for i in range(n_retries):
try:
super(MemmapingPool, self).terminate()
break
except OSError as e:
if isinstance(e, WindowsError):
# Workaround occasional "[Error 5] Access is denied" issue
# when trying to terminate a process under windows.
sleep(0.1)
if i + 1 == n_retries:
warnings.warn("Failed to terminate worker processes in"
" multiprocessing pool: %r" % e)
delete_folder(self._temp_folder)
| bsd-3-clause |
sekikn/incubator-airflow | airflow/providers/google/suite/operators/sheets.py | 7 | 3600 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Dict, Optional, Sequence, Union
from airflow.models import BaseOperator
from airflow.providers.google.suite.hooks.sheets import GSheetsHook
from airflow.utils.decorators import apply_defaults
class GoogleSheetsCreateSpreadsheetOperator(BaseOperator):
"""
Creates a new spreadsheet.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSheetsCreateSpreadsheetOperator`
:param spreadsheet: an instance of Spreadsheet
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet
:type spreadsheet: Dict[str, Any]
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = [
"spreadsheet",
"impersonation_chain",
]
@apply_defaults
def __init__(
self,
*,
spreadsheet: Dict[str, Any],
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.gcp_conn_id = gcp_conn_id
self.spreadsheet = spreadsheet
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: Any) -> Dict[str, Any]:
hook = GSheetsHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
spreadsheet = hook.create_spreadsheet(spreadsheet=self.spreadsheet)
self.xcom_push(context, "spreadsheet_id", spreadsheet["spreadsheetId"])
self.xcom_push(context, "spreadsheet_url", spreadsheet["spreadsheetUrl"])
return spreadsheet
| apache-2.0 |
nojhan/weboob-devel | modules/trictractv/video.py | 7 | 1032 | # -*- coding: utf-8 -*-
# Copyright(C) 2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.video import BaseVideo
class TricTracTVVideo(BaseVideo):
def __init__(self, *args, **kwargs):
BaseVideo.__init__(self, *args, **kwargs)
self.ext = u'flv'
@classmethod
def id2url(cls, _id):
return 'http://www.trictrac.tv/video-%s' % _id
| agpl-3.0 |
LeeEunhyeong/test_camera | cameratest01.py | 1 | 2279 | import sys, time, traceback
# Simple audio encoder
def recodeAudio( fName, fOutput, type, bitrate= None ):
# ------------------------------------
import pymedia.audio.acodec as acodec
import pymedia.muxer as muxer
# Open demuxer
dm= muxer.Demuxer( fName.split( '.' )[ -1 ].lower() )
f= open( fName, 'rb' )
s= f.read( 90000 )
dec= enc= mx= None
print 'Recoding %s into %s' % ( fName, fOutput )
while len( s ):
frames= dm.parse( s )
if frames:
for fr in frames:
# Assume for now only audio streams
if dec== None:
# Open decoder
dec= acodec.Decoder( dm.streams[ fr[ 0 ] ] )
print 'Decoder params:', dm.streams[ fr[ 0 ] ]
# Decode audio frame
r= dec.decode( fr[ 1 ] )
if r:
if bitrate== None:
bitrate= r.bitrate
# Open muxer and encoder
if enc== None:
params= { 'id': acodec.getCodecID(type),
'bitrate': bitrate,
'sample_rate': r.sample_rate,
'channels': r.channels }
print 'Encoder params:', params
mx= muxer.Muxer( type )
stId= mx.addStream( muxer.CODEC_TYPE_AUDIO, params )
enc= acodec.Encoder( params )
fw= open(fOutput, 'wb')
ss= mx.start()
fw.write(ss)
enc_frames= enc.encode( r.data )
if enc_frames:
for efr in enc_frames:
ss= mx.write( stId, efr )
if ss:
fw.write(ss)
s= f.read( 100000 )
f.close()
if fw:
if mx:
ss= mx.end()
if ss:
fw.write(ss)
fw.close()
# ----------------------------------------------------------------------------------
# Change the format of your compressed audio files to something different
# http://pymedia.org/
if __name__== '__main__':
if len( sys.argv )< 4 or len( sys.argv )> 5:
print "Usage: recode_audio.py <audio_input_file> <audio_output_file> <format_name> [ <bitrate> ]"
else:
if len( sys.argv )== 4:
recodeAudio( sys.argv[1], sys.argv[2], sys.argv[3] )
else:
recodeAudio( sys.argv[1], sys.argv[2], sys.argv[3], int( sys.argv[4] )* 1000 )
| mit |
ewpatton/appinventor-sources | appinventor/misc/emulator-support/aiStarter.py | 8 | 5821 | #!/usr/bin/python
# -*- coding: utf-8; fill-column: 120 -*-
import os
import platform
import re
import subprocess
import sys
import config
from bottle import run, route, response
VERSION = '%d.%d.%d%s' % (config.ANDROID_PLATFORM, config.COMPANION_VERSION, config.MINOR_VERSION, config.BUILD_EXTRAS)
PLATDIR = os.path.abspath(os.path.dirname(sys.argv[0]))
# Path to executables
ADB = os.path.join(PLATDIR, 'from-Android-SDK', 'platform-tools', 'adb')
RUN_EMULATOR = os.path.join(PLATDIR, 'run-emulator')
RESET_EMULATOR = os.path.join(PLATDIR, 'reset-emulator')
KILL_EMULATOR = os.path.join(PLATDIR, 'kill-emulator')
@route('/ping/')
def ping():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
response.headers['Content-Type'] = 'application/json'
return {
"status": "OK",
"version": VERSION
}
@route('/utest/')
def utest():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
response.headers['Content-Type'] = 'application/json'
device = checkrunning(False)
if device:
return {
"status": "OK",
"device": device,
"version": VERSION
}
else:
return {
"status": "NO",
"version": VERSION
}
@route('/start/')
def start():
subprocess.call(RUN_EMULATOR, shell=True)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
return ''
@route('/emulatorreset/')
def emulatorreset():
subprocess.call(RESET_EMULATOR, shell=True)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
return ''
@route('/echeck/')
def echeck():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
response.headers['Content-Type'] = 'application/json'
device = checkrunning(True)
if device:
return {
"status": "OK",
"device": device,
"version": VERSION
}
else:
return {
"status": "NO",
"version": VERSION
}
@route('/ucheck/')
def ucheck():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
response.headers['Content-Type'] = 'application/json'
device = checkrunning(False)
if device:
return {
"status": "OK",
"device": device,
"version": VERSION
}
else:
return {
"status": "NO",
"version": VERSION
}
@route('/reset/')
def reset():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
response.headers['Content-Type'] = 'application/json'
shutdown()
return {
"status": "OK",
"version": VERSION
}
@route('/replstart/:device')
def replstart(device=None):
print('Device =', device)
try:
subprocess.check_output('"%s" -s %s forward tcp:8001 tcp:8001' % (ADB, device), shell=True)
if re.match('emulator.*', device): # Only fake the menu key for the emulator
subprocess.check_output('"%s" -s %s shell input keyevent 82' % (ADB, device), shell=True)
subprocess.check_output(
'"%s" -s %s shell am start -a android.intent.action.VIEW -n edu.mit.appinventor.aicompanion3/.Screen1 --ez rundirect true' % (ADB, device),
shell=True)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
return ''
except subprocess.CalledProcessError as e:
print('Problem starting companion app : status', e.returncode)
return ''
def checkrunning(emulator):
try:
match = None
result = subprocess.check_output('"%s" devices' % ADB, shell=True)
lines = result.splitlines()
for line in lines[1:]:
line = str(line, 'utf-8')
if line:
if emulator:
match = re.search(r'^(emulator-\d+)\s+device$', line)
else:
if re.search(r'^(emulator-\d+)\s+device$', line): # We are emulator
continue # Skip it
match = re.search(r'^([\w\d]+)\s+device$', line)
if match:
break
if match:
return match.group(1)
return False
except subprocess.CalledProcessError as e:
print('Problem checking for devices : status', e.returncode)
return False
def killadb():
try:
subprocess.check_output('"%s" kill-server' % ADB, shell=True)
print('Killed adb')
except subprocess.CalledProcessError as e:
print('Problem stopping adb : status', e.returncode)
def killemulator():
try:
subprocess.check_output('"%s"' % KILL_EMULATOR, shell=True)
print('Killed emulator')
except subprocess.CalledProcessError as e:
print('Problem stopping emulator : status', e.returncode)
def shutdown():
try:
killemulator()
killadb()
except:
pass
if __name__ == '__main__':
print('App Inventor version:', VERSION, '\n')
print('Architecture:', platform.machine(), '\n')
print('AppInventor tools located here:', PLATDIR, '\n')
print('ADB path:', ADB)
import atexit
atexit.register(shutdown)
run(host='127.0.0.1', port=8004)
| apache-2.0 |
kuke/models | fluid/PaddleCV/face_detection/image_util.py | 1 | 19212 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from PIL import Image, ImageEnhance, ImageDraw
from PIL import ImageFile
import numpy as np
import random
import math
import cv2
ImageFile.LOAD_TRUNCATED_IMAGES = True #otherwise IOError raised image file is truncated
class sampler():
def __init__(self,
max_sample,
max_trial,
min_scale,
max_scale,
min_aspect_ratio,
max_aspect_ratio,
min_jaccard_overlap,
max_jaccard_overlap,
min_object_coverage,
max_object_coverage,
use_square=False):
self.max_sample = max_sample
self.max_trial = max_trial
self.min_scale = min_scale
self.max_scale = max_scale
self.min_aspect_ratio = min_aspect_ratio
self.max_aspect_ratio = max_aspect_ratio
self.min_jaccard_overlap = min_jaccard_overlap
self.max_jaccard_overlap = max_jaccard_overlap
self.min_object_coverage = min_object_coverage
self.max_object_coverage = max_object_coverage
self.use_square = use_square
class bbox():
def __init__(self, xmin, ymin, xmax, ymax):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
def intersect_bbox(bbox1, bbox2):
if bbox2.xmin > bbox1.xmax or bbox2.xmax < bbox1.xmin or \
bbox2.ymin > bbox1.ymax or bbox2.ymax < bbox1.ymin:
intersection_box = bbox(0.0, 0.0, 0.0, 0.0)
else:
intersection_box = bbox(
max(bbox1.xmin, bbox2.xmin),
max(bbox1.ymin, bbox2.ymin),
min(bbox1.xmax, bbox2.xmax), min(bbox1.ymax, bbox2.ymax))
return intersection_box
def bbox_coverage(bbox1, bbox2):
inter_box = intersect_bbox(bbox1, bbox2)
intersect_size = bbox_area(inter_box)
if intersect_size > 0:
bbox1_size = bbox_area(bbox1)
return intersect_size / bbox1_size
else:
return 0.
def bbox_area(src_bbox):
if src_bbox.xmax < src_bbox.xmin or src_bbox.ymax < src_bbox.ymin:
return 0.
else:
width = src_bbox.xmax - src_bbox.xmin
height = src_bbox.ymax - src_bbox.ymin
return width * height
def generate_sample(sampler, image_width, image_height):
scale = np.random.uniform(sampler.min_scale, sampler.max_scale)
aspect_ratio = np.random.uniform(sampler.min_aspect_ratio,
sampler.max_aspect_ratio)
aspect_ratio = max(aspect_ratio, (scale**2.0))
aspect_ratio = min(aspect_ratio, 1 / (scale**2.0))
bbox_width = scale * (aspect_ratio**0.5)
bbox_height = scale / (aspect_ratio**0.5)
# guarantee a squared image patch after cropping
if sampler.use_square:
if image_height < image_width:
bbox_width = bbox_height * image_height / image_width
else:
bbox_height = bbox_width * image_width / image_height
xmin_bound = 1 - bbox_width
ymin_bound = 1 - bbox_height
xmin = np.random.uniform(0, xmin_bound)
ymin = np.random.uniform(0, ymin_bound)
xmax = xmin + bbox_width
ymax = ymin + bbox_height
sampled_bbox = bbox(xmin, ymin, xmax, ymax)
return sampled_bbox
def data_anchor_sampling(sampler, bbox_labels, image_width, image_height,
scale_array, resize_width, resize_height):
num_gt = len(bbox_labels)
# np.random.randint range: [low, high)
rand_idx = np.random.randint(0, num_gt) if num_gt != 0 else 0
if num_gt != 0:
norm_xmin = bbox_labels[rand_idx][1]
norm_ymin = bbox_labels[rand_idx][2]
norm_xmax = bbox_labels[rand_idx][3]
norm_ymax = bbox_labels[rand_idx][4]
xmin = norm_xmin * image_width
ymin = norm_ymin * image_height
wid = image_width * (norm_xmax - norm_xmin)
hei = image_height * (norm_ymax - norm_ymin)
range_size = 0
area = wid * hei
for scale_ind in range(0, len(scale_array) - 1):
if area > scale_array[scale_ind] ** 2 and area < \
scale_array[scale_ind + 1] ** 2:
range_size = scale_ind + 1
break
if area > scale_array[len(scale_array) - 2]**2:
range_size = len(scale_array) - 2
scale_choose = 0.0
if range_size == 0:
rand_idx_size = 0
else:
# np.random.randint range: [low, high)
rng_rand_size = np.random.randint(0, range_size + 1)
rand_idx_size = rng_rand_size % (range_size + 1)
if rand_idx_size == range_size:
min_resize_val = scale_array[rand_idx_size] / 2.0
max_resize_val = min(2.0 * scale_array[rand_idx_size],
2 * math.sqrt(wid * hei))
scale_choose = random.uniform(min_resize_val, max_resize_val)
else:
min_resize_val = scale_array[rand_idx_size] / 2.0
max_resize_val = 2.0 * scale_array[rand_idx_size]
scale_choose = random.uniform(min_resize_val, max_resize_val)
sample_bbox_size = wid * resize_width / scale_choose
w_off_orig = 0.0
h_off_orig = 0.0
if sample_bbox_size < max(image_height, image_width):
if wid <= sample_bbox_size:
w_off_orig = np.random.uniform(xmin + wid - sample_bbox_size,
xmin)
else:
w_off_orig = np.random.uniform(xmin,
xmin + wid - sample_bbox_size)
if hei <= sample_bbox_size:
h_off_orig = np.random.uniform(ymin + hei - sample_bbox_size,
ymin)
else:
h_off_orig = np.random.uniform(ymin,
ymin + hei - sample_bbox_size)
else:
w_off_orig = np.random.uniform(image_width - sample_bbox_size, 0.0)
h_off_orig = np.random.uniform(image_height - sample_bbox_size, 0.0)
w_off_orig = math.floor(w_off_orig)
h_off_orig = math.floor(h_off_orig)
# Figure out top left coordinates.
w_off = 0.0
h_off = 0.0
w_off = float(w_off_orig / image_width)
h_off = float(h_off_orig / image_height)
sampled_bbox = bbox(w_off, h_off,
w_off + float(sample_bbox_size / image_width),
h_off + float(sample_bbox_size / image_height))
return sampled_bbox
else:
return 0
def jaccard_overlap(sample_bbox, object_bbox):
if sample_bbox.xmin >= object_bbox.xmax or \
sample_bbox.xmax <= object_bbox.xmin or \
sample_bbox.ymin >= object_bbox.ymax or \
sample_bbox.ymax <= object_bbox.ymin:
return 0
intersect_xmin = max(sample_bbox.xmin, object_bbox.xmin)
intersect_ymin = max(sample_bbox.ymin, object_bbox.ymin)
intersect_xmax = min(sample_bbox.xmax, object_bbox.xmax)
intersect_ymax = min(sample_bbox.ymax, object_bbox.ymax)
intersect_size = (intersect_xmax - intersect_xmin) * (
intersect_ymax - intersect_ymin)
sample_bbox_size = bbox_area(sample_bbox)
object_bbox_size = bbox_area(object_bbox)
overlap = intersect_size / (
sample_bbox_size + object_bbox_size - intersect_size)
return overlap
def satisfy_sample_constraint(sampler, sample_bbox, bbox_labels):
if sampler.min_jaccard_overlap == 0 and sampler.max_jaccard_overlap == 0:
has_jaccard_overlap = False
else:
has_jaccard_overlap = True
if sampler.min_object_coverage == 0 and sampler.max_object_coverage == 0:
has_object_coverage = False
else:
has_object_coverage = True
if not has_jaccard_overlap and not has_object_coverage:
return True
found = False
for i in range(len(bbox_labels)):
object_bbox = bbox(bbox_labels[i][1], bbox_labels[i][2],
bbox_labels[i][3], bbox_labels[i][4])
if has_jaccard_overlap:
overlap = jaccard_overlap(sample_bbox, object_bbox)
if sampler.min_jaccard_overlap != 0 and \
overlap < sampler.min_jaccard_overlap:
continue
if sampler.max_jaccard_overlap != 0 and \
overlap > sampler.max_jaccard_overlap:
continue
found = True
if has_object_coverage:
object_coverage = bbox_coverage(object_bbox, sample_bbox)
if sampler.min_object_coverage != 0 and \
object_coverage < sampler.min_object_coverage:
continue
if sampler.max_object_coverage != 0 and \
object_coverage > sampler.max_object_coverage:
continue
found = True
if found:
return True
return found
def generate_batch_samples(batch_sampler, bbox_labels, image_width,
image_height):
sampled_bbox = []
for sampler in batch_sampler:
found = 0
for i in range(sampler.max_trial):
if found >= sampler.max_sample:
break
sample_bbox = generate_sample(sampler, image_width, image_height)
if satisfy_sample_constraint(sampler, sample_bbox, bbox_labels):
sampled_bbox.append(sample_bbox)
found = found + 1
return sampled_bbox
def generate_batch_random_samples(batch_sampler, bbox_labels, image_width,
image_height, scale_array, resize_width,
resize_height):
sampled_bbox = []
for sampler in batch_sampler:
found = 0
for i in range(sampler.max_trial):
if found >= sampler.max_sample:
break
sample_bbox = data_anchor_sampling(
sampler, bbox_labels, image_width, image_height, scale_array,
resize_width, resize_height)
if sample_bbox == 0:
break
if satisfy_sample_constraint(sampler, sample_bbox, bbox_labels):
sampled_bbox.append(sample_bbox)
found = found + 1
return sampled_bbox
def clip_bbox(src_bbox):
src_bbox.xmin = max(min(src_bbox.xmin, 1.0), 0.0)
src_bbox.ymin = max(min(src_bbox.ymin, 1.0), 0.0)
src_bbox.xmax = max(min(src_bbox.xmax, 1.0), 0.0)
src_bbox.ymax = max(min(src_bbox.ymax, 1.0), 0.0)
return src_bbox
def meet_emit_constraint(src_bbox, sample_bbox):
center_x = (src_bbox.xmax + src_bbox.xmin) / 2
center_y = (src_bbox.ymax + src_bbox.ymin) / 2
if center_x >= sample_bbox.xmin and \
center_x <= sample_bbox.xmax and \
center_y >= sample_bbox.ymin and \
center_y <= sample_bbox.ymax:
return True
return False
def project_bbox(object_bbox, sample_bbox):
if object_bbox.xmin >= sample_bbox.xmax or \
object_bbox.xmax <= sample_bbox.xmin or \
object_bbox.ymin >= sample_bbox.ymax or \
object_bbox.ymax <= sample_bbox.ymin:
return False
else:
proj_bbox = bbox(0, 0, 0, 0)
sample_width = sample_bbox.xmax - sample_bbox.xmin
sample_height = sample_bbox.ymax - sample_bbox.ymin
proj_bbox.xmin = (object_bbox.xmin - sample_bbox.xmin) / sample_width
proj_bbox.ymin = (object_bbox.ymin - sample_bbox.ymin) / sample_height
proj_bbox.xmax = (object_bbox.xmax - sample_bbox.xmin) / sample_width
proj_bbox.ymax = (object_bbox.ymax - sample_bbox.ymin) / sample_height
proj_bbox = clip_bbox(proj_bbox)
if bbox_area(proj_bbox) > 0:
return proj_bbox
else:
return False
def transform_labels(bbox_labels, sample_bbox):
sample_labels = []
for i in range(len(bbox_labels)):
sample_label = []
object_bbox = bbox(bbox_labels[i][1], bbox_labels[i][2],
bbox_labels[i][3], bbox_labels[i][4])
if not meet_emit_constraint(object_bbox, sample_bbox):
continue
proj_bbox = project_bbox(object_bbox, sample_bbox)
if proj_bbox:
sample_label.append(bbox_labels[i][0])
sample_label.append(float(proj_bbox.xmin))
sample_label.append(float(proj_bbox.ymin))
sample_label.append(float(proj_bbox.xmax))
sample_label.append(float(proj_bbox.ymax))
sample_label = sample_label + bbox_labels[i][5:]
sample_labels.append(sample_label)
return sample_labels
def transform_labels_sampling(bbox_labels, sample_bbox, resize_val,
min_face_size):
sample_labels = []
for i in range(len(bbox_labels)):
sample_label = []
object_bbox = bbox(bbox_labels[i][1], bbox_labels[i][2],
bbox_labels[i][3], bbox_labels[i][4])
if not meet_emit_constraint(object_bbox, sample_bbox):
continue
proj_bbox = project_bbox(object_bbox, sample_bbox)
if proj_bbox:
real_width = float((proj_bbox.xmax - proj_bbox.xmin) * resize_val)
real_height = float((proj_bbox.ymax - proj_bbox.ymin) * resize_val)
if real_width * real_height < float(min_face_size * min_face_size):
continue
else:
sample_label.append(bbox_labels[i][0])
sample_label.append(float(proj_bbox.xmin))
sample_label.append(float(proj_bbox.ymin))
sample_label.append(float(proj_bbox.xmax))
sample_label.append(float(proj_bbox.ymax))
sample_label = sample_label + bbox_labels[i][5:]
sample_labels.append(sample_label)
return sample_labels
def crop_image(img, bbox_labels, sample_bbox, image_width, image_height,
resize_width, resize_height, min_face_size):
sample_bbox = clip_bbox(sample_bbox)
xmin = int(sample_bbox.xmin * image_width)
xmax = int(sample_bbox.xmax * image_width)
ymin = int(sample_bbox.ymin * image_height)
ymax = int(sample_bbox.ymax * image_height)
sample_img = img[ymin:ymax, xmin:xmax]
resize_val = resize_width
sample_labels = transform_labels_sampling(bbox_labels, sample_bbox,
resize_val, min_face_size)
return sample_img, sample_labels
def crop_image_sampling(img, bbox_labels, sample_bbox, image_width,
image_height, resize_width, resize_height,
min_face_size):
# no clipping here
xmin = int(sample_bbox.xmin * image_width)
xmax = int(sample_bbox.xmax * image_width)
ymin = int(sample_bbox.ymin * image_height)
ymax = int(sample_bbox.ymax * image_height)
w_off = xmin
h_off = ymin
width = xmax - xmin
height = ymax - ymin
cross_xmin = max(0.0, float(w_off))
cross_ymin = max(0.0, float(h_off))
cross_xmax = min(float(w_off + width - 1.0), float(image_width))
cross_ymax = min(float(h_off + height - 1.0), float(image_height))
cross_width = cross_xmax - cross_xmin
cross_height = cross_ymax - cross_ymin
roi_xmin = 0 if w_off >= 0 else abs(w_off)
roi_ymin = 0 if h_off >= 0 else abs(h_off)
roi_width = cross_width
roi_height = cross_height
roi_y1 = int(roi_ymin)
roi_y2 = int(roi_ymin + roi_height)
roi_x1 = int(roi_xmin)
roi_x2 = int(roi_xmin + roi_width)
cross_y1 = int(cross_ymin)
cross_y2 = int(cross_ymin + cross_height)
cross_x1 = int(cross_xmin)
cross_x2 = int(cross_xmin + cross_width)
sample_img = np.zeros((height, width, 3))
sample_img[roi_y1 : roi_y2, roi_x1 : roi_x2] = \
img[cross_y1 : cross_y2, cross_x1 : cross_x2]
sample_img = cv2.resize(
sample_img, (resize_width, resize_height), interpolation=cv2.INTER_AREA)
resize_val = resize_width
sample_labels = transform_labels_sampling(bbox_labels, sample_bbox,
resize_val, min_face_size)
return sample_img, sample_labels
def random_brightness(img, settings):
prob = np.random.uniform(0, 1)
if prob < settings.brightness_prob:
delta = np.random.uniform(-settings.brightness_delta,
settings.brightness_delta) + 1
img = ImageEnhance.Brightness(img).enhance(delta)
return img
def random_contrast(img, settings):
prob = np.random.uniform(0, 1)
if prob < settings.contrast_prob:
delta = np.random.uniform(-settings.contrast_delta,
settings.contrast_delta) + 1
img = ImageEnhance.Contrast(img).enhance(delta)
return img
def random_saturation(img, settings):
prob = np.random.uniform(0, 1)
if prob < settings.saturation_prob:
delta = np.random.uniform(-settings.saturation_delta,
settings.saturation_delta) + 1
img = ImageEnhance.Color(img).enhance(delta)
return img
def random_hue(img, settings):
prob = np.random.uniform(0, 1)
if prob < settings.hue_prob:
delta = np.random.uniform(-settings.hue_delta, settings.hue_delta)
img_hsv = np.array(img.convert('HSV'))
img_hsv[:, :, 0] = img_hsv[:, :, 0] + delta
img = Image.fromarray(img_hsv, mode='HSV').convert('RGB')
return img
def distort_image(img, settings):
prob = np.random.uniform(0, 1)
# Apply different distort order
if prob > 0.5:
img = random_brightness(img, settings)
img = random_contrast(img, settings)
img = random_saturation(img, settings)
img = random_hue(img, settings)
else:
img = random_brightness(img, settings)
img = random_saturation(img, settings)
img = random_hue(img, settings)
img = random_contrast(img, settings)
return img
def expand_image(img, bbox_labels, img_width, img_height, settings):
prob = np.random.uniform(0, 1)
if prob < settings.expand_prob:
if settings.expand_max_ratio - 1 >= 0.01:
expand_ratio = np.random.uniform(1, settings.expand_max_ratio)
height = int(img_height * expand_ratio)
width = int(img_width * expand_ratio)
h_off = math.floor(np.random.uniform(0, height - img_height))
w_off = math.floor(np.random.uniform(0, width - img_width))
expand_bbox = bbox(-w_off / img_width, -h_off / img_height,
(width - w_off) / img_width,
(height - h_off) / img_height)
expand_img = np.ones((height, width, 3))
expand_img = np.uint8(expand_img * np.squeeze(settings.img_mean))
expand_img = Image.fromarray(expand_img)
expand_img.paste(img, (int(w_off), int(h_off)))
bbox_labels = transform_labels(bbox_labels, expand_bbox)
return expand_img, bbox_labels, width, height
return img, bbox_labels, img_width, img_height
| apache-2.0 |
Eduardo2505/Ave | subir/server/gae-python/main.py | 77 | 7577 | # -*- coding: utf-8 -*-
#
# jQuery File Upload Plugin GAE Python Example
# https://github.com/blueimp/jQuery-File-Upload
#
# Copyright 2011, Sebastian Tschan
# https://blueimp.net
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
#
from google.appengine.api import memcache, images
import json
import os
import re
import urllib
import webapp2
DEBUG=os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')
WEBSITE = 'https://blueimp.github.io/jQuery-File-Upload/'
MIN_FILE_SIZE = 1 # bytes
# Max file size is memcache limit (1MB) minus key size minus overhead:
MAX_FILE_SIZE = 999000 # bytes
IMAGE_TYPES = re.compile('image/(gif|p?jpeg|(x-)?png)')
ACCEPT_FILE_TYPES = IMAGE_TYPES
THUMB_MAX_WIDTH = 80
THUMB_MAX_HEIGHT = 80
THUMB_SUFFIX = '.'+str(THUMB_MAX_WIDTH)+'x'+str(THUMB_MAX_HEIGHT)+'.png'
EXPIRATION_TIME = 300 # seconds
# If set to None, only allow redirects to the referer protocol+host.
# Set to a regexp for custom pattern matching against the redirect value:
REDIRECT_ALLOW_TARGET = None
class CORSHandler(webapp2.RequestHandler):
def cors(self):
headers = self.response.headers
headers['Access-Control-Allow-Origin'] = '*'
headers['Access-Control-Allow-Methods'] =\
'OPTIONS, HEAD, GET, POST, DELETE'
headers['Access-Control-Allow-Headers'] =\
'Content-Type, Content-Range, Content-Disposition'
def initialize(self, request, response):
super(CORSHandler, self).initialize(request, response)
self.cors()
def json_stringify(self, obj):
return json.dumps(obj, separators=(',', ':'))
def options(self, *args, **kwargs):
pass
class UploadHandler(CORSHandler):
def validate(self, file):
if file['size'] < MIN_FILE_SIZE:
file['error'] = 'File is too small'
elif file['size'] > MAX_FILE_SIZE:
file['error'] = 'File is too big'
elif not ACCEPT_FILE_TYPES.match(file['type']):
file['error'] = 'Filetype not allowed'
else:
return True
return False
def validate_redirect(self, redirect):
if redirect:
if REDIRECT_ALLOW_TARGET:
return REDIRECT_ALLOW_TARGET.match(redirect)
referer = self.request.headers['referer']
if referer:
from urlparse import urlparse
parts = urlparse(referer)
redirect_allow_target = '^' + re.escape(
parts.scheme + '://' + parts.netloc + '/'
)
return re.match(redirect_allow_target, redirect)
return False
def get_file_size(self, file):
file.seek(0, 2) # Seek to the end of the file
size = file.tell() # Get the position of EOF
file.seek(0) # Reset the file position to the beginning
return size
def write_blob(self, data, info):
key = urllib.quote(info['type'].encode('utf-8'), '') +\
'/' + str(hash(data)) +\
'/' + urllib.quote(info['name'].encode('utf-8'), '')
try:
memcache.set(key, data, time=EXPIRATION_TIME)
except: #Failed to add to memcache
return (None, None)
thumbnail_key = None
if IMAGE_TYPES.match(info['type']):
try:
img = images.Image(image_data=data)
img.resize(
width=THUMB_MAX_WIDTH,
height=THUMB_MAX_HEIGHT
)
thumbnail_data = img.execute_transforms()
thumbnail_key = key + THUMB_SUFFIX
memcache.set(
thumbnail_key,
thumbnail_data,
time=EXPIRATION_TIME
)
except: #Failed to resize Image or add to memcache
thumbnail_key = None
return (key, thumbnail_key)
def handle_upload(self):
results = []
for name, fieldStorage in self.request.POST.items():
if type(fieldStorage) is unicode:
continue
result = {}
result['name'] = urllib.unquote(fieldStorage.filename)
result['type'] = fieldStorage.type
result['size'] = self.get_file_size(fieldStorage.file)
if self.validate(result):
key, thumbnail_key = self.write_blob(
fieldStorage.value,
result
)
if key is not None:
result['url'] = self.request.host_url + '/' + key
result['deleteUrl'] = result['url']
result['deleteType'] = 'DELETE'
if thumbnail_key is not None:
result['thumbnailUrl'] = self.request.host_url +\
'/' + thumbnail_key
else:
result['error'] = 'Failed to store uploaded file.'
results.append(result)
return results
def head(self):
pass
def get(self):
self.redirect(WEBSITE)
def post(self):
if (self.request.get('_method') == 'DELETE'):
return self.delete()
result = {'files': self.handle_upload()}
s = self.json_stringify(result)
redirect = self.request.get('redirect')
if self.validate_redirect(redirect):
return self.redirect(str(
redirect.replace('%s', urllib.quote(s, ''), 1)
))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
class FileHandler(CORSHandler):
def normalize(self, str):
return urllib.quote(urllib.unquote(str), '')
def get(self, content_type, data_hash, file_name):
content_type = self.normalize(content_type)
file_name = self.normalize(file_name)
key = content_type + '/' + data_hash + '/' + file_name
data = memcache.get(key)
if data is None:
return self.error(404)
# Prevent browsers from MIME-sniffing the content-type:
self.response.headers['X-Content-Type-Options'] = 'nosniff'
content_type = urllib.unquote(content_type)
if not IMAGE_TYPES.match(content_type):
# Force a download dialog for non-image types:
content_type = 'application/octet-stream'
elif file_name.endswith(THUMB_SUFFIX):
content_type = 'image/png'
self.response.headers['Content-Type'] = content_type
# Cache for the expiration time:
self.response.headers['Cache-Control'] = 'public,max-age=%d' \
% EXPIRATION_TIME
self.response.write(data)
def delete(self, content_type, data_hash, file_name):
content_type = self.normalize(content_type)
file_name = self.normalize(file_name)
key = content_type + '/' + data_hash + '/' + file_name
result = {key: memcache.delete(key)}
content_type = urllib.unquote(content_type)
if IMAGE_TYPES.match(content_type):
thumbnail_key = key + THUMB_SUFFIX
result[thumbnail_key] = memcache.delete(thumbnail_key)
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
s = self.json_stringify(result)
self.response.write(s)
app = webapp2.WSGIApplication(
[
('/', UploadHandler),
('/(.+)/([^/]+)/([^/]+)', FileHandler)
],
debug=DEBUG
)
| mit |
dragondjf/cqssl | handlers/yunpian_sms.py | 1 | 2967 | #!/usr/local/bin/python
#-*- coding:utf-8 -*-
# Author: jacky
# Time: 14-2-22 下午11:48
# Desc: 短信http接口的python代码调用示例
import httplib
import urllib
import logging
#服务地址
host = "yunpian.com"
#端口号
port = 80
#版本号
version = "v1"
#查账户信息的URI
user_get_uri = "/" + version + "/user/get.json"
#智能匹配模版短信接口的URI
sms_send_uri = "/" + version + "/sms/send.json"
#模板短信接口的URI
sms_tpl_send_uri = "/" + version + "/sms/tpl_send.json"
def get_user_info(apikey):
"""
取账户信息
"""
conn = httplib.HTTPConnection(host, port=port)
conn.request('GET', user_get_uri + "?apikey=" + apikey)
response = conn.getresponse()
response_str = response.read()
conn.close()
return response_str
def send_sms(apikey, text, mobile):
"""
能用接口发短信
"""
params = urllib.urlencode({'apikey': apikey, 'text': text, 'mobile':mobile})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
conn = httplib.HTTPConnection(host, port=port, timeout=30)
conn.request("POST", sms_send_uri, params, headers)
response = conn.getresponse()
response_str = response.read()
conn.close()
return response_str
def tpl_send_sms(apikey, tpl_id, tpl_value, mobile):
"""
模板接口发短信
"""
params = urllib.urlencode({'apikey': apikey, 'tpl_id':tpl_id, 'tpl_value': tpl_value, 'mobile':mobile})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
conn = httplib.HTTPConnection(host, port=port, timeout=30)
conn.request("POST", sms_tpl_send_uri, params, headers)
response = conn.getresponse()
response_str = response.read()
conn.close()
return response_str
def sendsms(result):
apikey = "25bd87c30a32e1bec18430f67acd199b "
mobile = "13986218913"
tpl_id = 1002491 #对应的模板内容为:您的验证码是#code#【#company#】
message1 = "#type1#=大&#count1#=%d" % (result['large'])
message2 = "#type2#=小&#count2#=%d" % (result['small'])
message3 = "#type3#=单&#count3#=%d" % (result['odd'])
message4 = "#type4#=双&#count4#=%d" % (result['even'])
message = "%s&%s&%s&%s" % (message1, message2, message3, message4)
ret = tpl_send_sms(apikey, tpl_id, message, mobile)
logging.info(ret)
if __name__ == '__main__':
apikey = "25bd87c30a32e1bec18430f67acd199b "
mobile = "13986218913"
text = "csqqc [Dan] 13"
#查账户信息
# print(get_user_info(apikey))
#调用智能匹配模版接口发短信
# print(send_sms(apikey, text, mobile))
# #调用模板接口发短信
tpl_id = 1002491 #对应的模板内容为:您的验证码是#code#【#company#】
# tpl_value = '#type1#=单&#count1#=5&#type2#=双&#count2#=15&#type3#=大&#count3#=5&#type4#=小&#count4#=5'
# print(tpl_send_sms(apikey, tpl_id, tpl_value, mobile))
| apache-2.0 |
ryepdx/readable_superuser | __openerp__.py | 1 | 1568 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 NovaPoint Group LLC (<http://www.novapointgroup.com>)
# Copyright (C) 2004-2010 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
{
'name': 'Readable Superuser',
'version': '1',
'category': 'Generic Modules/Others',
'description': "Make partner records belonging to the superuser globally readable.",
'author': 'RyePDX LLC',
'website': ' http://www.ryepdx.com',
'depends': ['mail', 'procurement', 'sale_stock', 'quickship'],
'data': ['permissions.yml'],
'test': ['test/setup.xml', 'test/permissions.yml'],
'installable': True,
'active': False
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
owlabs/incubator-airflow | airflow/operators/branch_operator.py | 1 | 2027 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Branching operators"""
from typing import Union, Iterable, Dict
from airflow.models import BaseOperator, SkipMixin
class BaseBranchOperator(BaseOperator, SkipMixin):
"""
This is a base class for creating operators with branching functionality,
similarly to BranchPythonOperator.
Users should subclass this operator and implement the function
`choose_branch(self, context)`. This should run whatever business logic
is needed to determine the branch, and return either the task_id for
a single task (as a str) or a list of task_ids.
The operator will continue with the returned task_id(s), and all other
tasks directly downstream of this operator will be skipped.
"""
def choose_branch(self, context): # type: (Dict) -> Union[str, Iterable[str]]
"""
Subclasses should implement this, running whatever logic is
necessary to choose a branch and returning a task_id or list of
task_ids.
:param context: Context dictionary as passed to execute()
:type context: dict
"""
raise NotImplementedError
def execute(self, context):
self.skip_all_except(context['ti'], self.choose_branch(context))
| apache-2.0 |
detrout/telepathy-qt | tools/libtpcodegen.py | 28 | 6610 | """Library code for language-independent D-Bus-related code generation.
The master copy of this library is in the telepathy-glib repository -
please make any changes there.
"""
# Copyright (C) 2006-2008 Collabora Limited
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from string import ascii_letters, digits
NS_TP = "http://telepathy.freedesktop.org/wiki/DbusSpec#extensions-v0"
_ASCII_ALNUM = ascii_letters + digits
def cmp_by_name(node1, node2):
return cmp(node1.getAttributeNode("name").nodeValue,
node2.getAttributeNode("name").nodeValue)
def escape_as_identifier(identifier):
"""Escape the given string to be a valid D-Bus object path or service
name component, using a reversible encoding to ensure uniqueness.
The reversible encoding is as follows:
* The empty string becomes '_'
* Otherwise, each non-alphanumeric character is replaced by '_' plus
two lower-case hex digits; the same replacement is carried out on
the first character, if it's a digit
"""
# '' -> '_'
if not identifier:
return '_'
# A bit of a fast path for strings which are already OK.
# We deliberately omit '_' because, for reversibility, that must also
# be escaped.
if (identifier.strip(_ASCII_ALNUM) == '' and
identifier[0] in ascii_letters):
return identifier
# The first character may not be a digit
if identifier[0] not in ascii_letters:
ret = ['_%02x' % ord(identifier[0])]
else:
ret = [identifier[0]]
# Subsequent characters may be digits or ASCII letters
for c in identifier[1:]:
if c in _ASCII_ALNUM:
ret.append(c)
else:
ret.append('_%02x' % ord(c))
return ''.join(ret)
def get_by_path(element, path):
branches = path.split('/')
branch = branches[0]
# Is the current branch an attribute, if so, return the attribute value
if branch[0] == '@':
return element.getAttribute(branch[1:])
# Find matching children for the branch
children = []
if branch == '..':
children.append(element.parentNode)
else:
for x in element.childNodes:
if x.localName == branch:
children.append(x)
ret = []
# If this is not the last path element, recursively gather results from
# children
if len(branches) > 1:
for x in children:
add = get_by_path(x, '/'.join(branches[1:]))
if isinstance(add, list):
ret += add
else:
return add
else:
ret = children
return ret
def get_docstring(element):
docstring = None
for x in element.childNodes:
if x.namespaceURI == NS_TP and x.localName == 'docstring':
docstring = x
if docstring is not None:
docstring = docstring.toxml().replace('\n', ' ').strip()
if docstring.startswith('<tp:docstring>'):
docstring = docstring[14:].lstrip()
if docstring.endswith('</tp:docstring>'):
docstring = docstring[:-15].rstrip()
if docstring in ('<tp:docstring/>', ''):
docstring = ''
return docstring
def get_deprecated(element):
text = []
for x in element.childNodes:
if hasattr(x, 'data'):
text.append(x.data.replace('\n', ' ').strip())
else:
# This caters for tp:dbus-ref elements, but little else.
if x.childNodes and hasattr(x.childNodes[0], 'data'):
text.append(x.childNodes[0].data.replace('\n', ' ').strip())
return ' '.join(text)
def get_descendant_text(element_or_elements):
if not element_or_elements:
return ''
if isinstance(element_or_elements, list):
return ''.join(map(get_descendant_text, element_or_elements))
parts = []
for x in element_or_elements.childNodes:
if x.nodeType == x.TEXT_NODE:
parts.append(x.nodeValue)
elif x.nodeType == x.ELEMENT_NODE:
parts.append(get_descendant_text(x))
else:
pass
return ''.join(parts)
class _SignatureIter:
"""Iterator over a D-Bus signature. Copied from dbus-python 0.71 so we
can run genginterface in a limited environment with only Python
(like Scratchbox).
"""
def __init__(self, string):
self.remaining = string
def next(self):
if self.remaining == '':
raise StopIteration
signature = self.remaining
block_depth = 0
block_type = None
end = len(signature)
for marker in range(0, end):
cur_sig = signature[marker]
if cur_sig == 'a':
pass
elif cur_sig == '{' or cur_sig == '(':
if block_type == None:
block_type = cur_sig
if block_type == cur_sig:
block_depth = block_depth + 1
elif cur_sig == '}':
if block_type == '{':
block_depth = block_depth - 1
if block_depth == 0:
end = marker
break
elif cur_sig == ')':
if block_type == '(':
block_depth = block_depth - 1
if block_depth == 0:
end = marker
break
else:
if block_depth == 0:
end = marker
break
end = end + 1
self.remaining = signature[end:]
return Signature(signature[0:end])
class Signature(str):
"""A string, iteration over which is by D-Bus single complete types
rather than characters.
"""
def __iter__(self):
return _SignatureIter(self)
def xml_escape(s):
s = s.replace('&', '&').replace("'", ''').replace('"', '"')
return s.replace('<', '<').replace('>', '>')
| lgpl-2.1 |
linas/blender_api_msgs | test/test_blender_api.py | 1 | 8165 | #!/usr/bin/env python
import unittest
import os
import sys
import time
import glob
import rospkg
import roslaunch
import rostopic
import rosbag
import rosnode
from roslaunch import core
from testing_tools.misc import (wait_for, startxvfb, stopxvfb, capture_screen,
run_shell_cmd, add_text_to_video, concatenate_videos,
rosbag_msg_generator, get_rosbag_file, check_if_ffmpeg_satisfied)
from blender_api_msgs.msg import *
from genpy import Duration
from roslaunch import nodeprocess
nodeprocess._TIMEOUT_SIGINT = 2
nodeprocess._TIMEOUT_SIGTERM = 1
TEST_DIR = rospkg.get_test_results_dir()
PKG = 'blender_api_msgs'
def parse_msg(msg):
return eval(msg.split(':')[1].strip())
class BlenderAPITest(unittest.TestCase):
def setUp(self):
blender_api_path = os.path.join(
rospkg.RosPack().get_path('blender_api_msgs'), '../blender_api')
config = roslaunch.config.ROSLaunchConfig()
config.add_node(
core.Node(
package='blender_api_msgs', node_type='blender',
args='-y %s/Sophia.blend -P %s/autostart.py' % (
blender_api_path, blender_api_path),
name='blender_api')
)
self.runner = roslaunch.launch.ROSLaunchRunner(
self.run_id, config, is_rostest=True)
self.runner.launch()
for node in config.nodes:
wait_for('%s/%s' % (node.namespace, node.name))
time.sleep(5) # Wait for blender rendering done
def tearDown(self):
self.runner.stop()
@classmethod
def setUpClass(self):
self.run_id = 'test_blender_api'
self.output_dir = '%s/%s/output_video' % (TEST_DIR, self.run_id)
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
self.display = os.environ.get('DISPLAY', ':0')
if self.display != ':0':
startxvfb(self.display, '1920x1080x24')
@classmethod
def tearDownClass(self):
if self.display != ':0':
stopxvfb(self.display)
if not os.path.isfile('%s/all.avi' % self.output_dir):
videos = glob.glob('%s/*.avi' % self.output_dir)
videos = [f for f in videos if not f.endswith('all.avi')]
if len(videos) > 1:
ofile = '%s/all.avi' % self.output_dir
concatenate_videos(videos, ofile, False)
@unittest.skipUnless(
check_if_ffmpeg_satisfied(), 'Skip because ffmpeg is not satisfied.')
def test_emotion_state(self):
available_emotions = parse_msg(run_shell_cmd(
'rostopic echo -n1 /blender_api/available_emotion_states', True))
available_emotions = available_emotions[:1]
pub, msg_class = rostopic.create_publisher(
'/blender_api/set_emotion_state',
'blender_api_msgs/EmotionState', True)
timeout = 2
videos = []
for emotion in available_emotions:
video = '%s/emotion-%s.avi' % (self.output_dir, emotion)
with capture_screen(video, timeout):
pub.publish(msg_class(emotion, 1, Duration(1, 0)))
add_text_to_video(video)
videos.append(video)
ofile = '%s/emotions.avi' % self.output_dir
concatenate_videos(videos, ofile, True)
pub.unregister()
@unittest.skipUnless(
check_if_ffmpeg_satisfied(), 'Skip because ffmpeg is not satisfied.')
def test_gesture(self):
available_gestures = parse_msg(run_shell_cmd(
'rostopic echo -n1 /blender_api/available_gestures', True))
available_gestures = available_gestures[:2]
pub, msg_class = rostopic.create_publisher(
'/blender_api/set_gesture',
'blender_api_msgs/SetGesture', True)
timeout = 2
videos = []
for gesture in available_gestures:
if gesture == 'all': continue
video = '%s/gesture-%s.avi' % (self.output_dir, gesture)
with capture_screen(video, timeout):
pub.publish(msg_class(gesture, 1, 1, 1))
add_text_to_video(video)
videos.append(video)
ofile = '%s/gestures.avi' % self.output_dir
concatenate_videos(videos, ofile, True)
pub.unregister()
@unittest.skipUnless(
check_if_ffmpeg_satisfied(), 'Skip because ffmpeg is not satisfied.')
def test_viseme(self):
available_visemes = parse_msg(run_shell_cmd(
'rostopic echo -n1 /blender_api/available_visemes', True))
available_visemes = available_visemes[:1]
pub, msg_class = rostopic.create_publisher(
'/blender_api/queue_viseme',
'blender_api_msgs/Viseme', True)
timeout = 2
videos = []
for viseme in available_visemes:
if 'old' in viseme: continue
video = '%s/viseme-%s.avi' % (self.output_dir, viseme)
with capture_screen(video, timeout):
pub.publish(msg_class(
viseme, Duration(0, 0), Duration(0, 5*1e8), 0.1, 0.8, 1))
add_text_to_video(video)
videos.append(video)
ofile = '%s/viseme.avi' % self.output_dir
concatenate_videos(videos, ofile, True)
pub.unregister()
@unittest.skipUnless(
check_if_ffmpeg_satisfied(), 'Skip because ffmpeg is not satisfied.')
def test_gaze_target(self):
pub, msg_class = rostopic.create_publisher(
'/blender_api/set_gaze_target',
'blender_api_msgs/Target', True)
timeout = 1
targets = {
'center': (1,0,0,1),
'right':(0,1,0,1),
'left':(0,-1,0,1),
'up':(1,0,0.5,1),
'down':(1,0,-0.5,1)}
videos = []
#for name in ['right', 'up', 'left', 'down', 'center']:
for name in ['right', 'center']:
video = '%s/gaze-%s.avi' % (self.output_dir, name)
with capture_screen(video, timeout):
pub.publish(msg_class(*targets[name]))
add_text_to_video(video)
videos.append(video)
ofile = '%s/gaze.avi' % self.output_dir
concatenate_videos(videos, ofile, True)
pub.unregister()
@unittest.skipUnless(
check_if_ffmpeg_satisfied(), 'Skip because ffmpeg is not satisfied.')
def test_face_target(self):
pub, msg_class = rostopic.create_publisher(
'/blender_api/set_face_target',
'blender_api_msgs/Target', True)
timeout = 2
targets = {
'center': (1,0,0,1),
'right':(0,1,0,1),
'left':(0,-1,0,1),
'up':(1,0,0.5,1),
'down':(1,0,-0.5,1)}
videos = []
#for name in ['right', 'up', 'left', 'down', 'center']:
for name in ['right', 'center']:
video = '%s/face-%s.avi' % (self.output_dir, name)
with capture_screen(video, timeout):
pub.publish(msg_class(*targets[name]))
add_text_to_video(video)
videos.append(video)
ofile = '%s/face.avi' % self.output_dir
concatenate_videos(videos, ofile, True)
pub.unregister()
def test_long_viseme(self):
filename = get_rosbag_file('long_viseme')
#job = play_rosbag(filename)
#job.join()
pub, msg_class = rostopic.create_publisher(
'/blender_api/queue_viseme', 'blender_api_msgs/Viseme', True)
bag = rosbag.Bag(filename)
duration = bag.get_end_time() - bag.get_start_time()
fps = bag.get_message_count() / float(duration)
wait = 1.0/fps/10 # 10 times faster than the speed of the msg recoreded
for topic, msg, _ in rosbag_msg_generator(filename):
pub.publish(msg)
time.sleep(wait)
# Test if blender is still alive
self.assertIn('/blender_api', rosnode.get_node_names())
self.assertTrue(
any(['blender_api' in name for name in self.runner.pm.get_active_names()])
)
if __name__ == '__main__':
import rostest
rostest.rosrun(PKG, 'blender_api', BlenderAPITest)
#unittest.main()
| bsd-3-clause |
nickabattista/IB2d | pyIB2d/IBM_Blackbox/Supp.py | 1 | 23320 | '''-------------------------------------------------------------------------
IB2d is an Immersed Boundary Code (IB) for solving fully coupled non-linear
fluid-structure interaction models. This version of the code is based off of
Peskin's Immersed Boundary Method Paper in Acta Numerica, 2002.
Author: Nicholas A. Battista
Email: [email protected]
Date Created: May 27th, 2015\
Python 3.5 port by: Christopher Strickland
Institution: UNC-CH
This code is capable of creating Lagrangian Structures using:
1. Springs
2. Beams (*torsional springs)
3. Target Points
4. Muscle-Model (combined Force-Length-Velocity model, "HIll+(Length-Tension)")
One is able to update those Lagrangian Structure parameters, e.g.,
spring constants, resting lengths, etc
There are a number of built in Examples, mostly used for teaching purposes.
If you would like us to add a specific muscle model,
please let Nick ([email protected]) know.
For the Python port, I am going to throw a lot of supporting functions into
here for convinence. That way they get loaded all at once, and are called
by their name in an intuitive way. The functions (with their subfunctions
following) are here in this order:
-- please_Move_Lagrangian_Point_Positions
-- give_NonZero_Delta_Indices_XY
-- give_Eulerian_Lagrangian_Distance
-- give_Delta_Kernel
-- give_1D_NonZero_Delta_Indices
-- please_Move_Massive_Boundary
-- please_Update_Massive_Boundary_Velocity
-- D
-- DD
-- please_Update_Adv_Diff_Concentration
----------------------------------------------------------------------------'''
import numpy as np
from math import sqrt
from numba import jit
################################################################################
#
# FUNCTION: Moves Lagrangian Point Positions by doing the integral,
#
# " xLag_Next = xLag_Prev + dt* int( u(x,t) delta( x - xLag_n ) dX ) "
#
################################################################################
def please_Move_Lagrangian_Point_Positions(mu, u, v, xL_P, yL_P, xL_H, yL_H, x, y,\
dt, grid_Info,porous_Yes,poroelastic_Yes,poroelastic_info,F_Poro):
''' Moves Lagrangian point positions
u: 2D array
v: 2D array
xL_P:
yL_P:
xL_H:
yL_H:
x:
y:
dt:
grid_Info:
porous_Yes:
Returns:
xL_Next:
yL_Next:'''
# Grid Info. grid_Info is a dict
Nx = grid_Info[0]
Ny = grid_Info[1]
Lx = grid_Info[2]
Ly = grid_Info[3]
dx = grid_Info[4]
dy = grid_Info[5]
supp = int(grid_Info[6])
Nb = grid_Info[7]
ds = grid_Info[8]
# Find indices where the delta-function kernels are non-zero for both x and y.
xInds,yInds = give_NonZero_Delta_Indices_XY(xL_H, yL_H, Nx, Ny, dx, dy, supp)
# ReSize the xL_H and yL_H matrices for use in the Dirac-delta function
# values to find distances between corresponding Eulerian data and them
xLH_aux = xL_H % Lx
yLH_aux = yL_H % Ly
# Stack copies of the row vector and then transpose
xL_H_ReSize = np.tile(xLH_aux,(supp**2,1)).T
yL_H_ReSize = np.tile(yLH_aux,(supp**2,1)).T
# Finds distance between specified Eulerian data and nearby Lagrangian data
# x is a 1D array. x[xInds] is a 2D array of values in x
if ( np.isscalar(xL_P) ):
distX = give_Eulerian_Lagrangian_Distance(x[xInds], xL_H_ReSize, Lx)
distY = give_Eulerian_Lagrangian_Distance(y[yInds], yL_H_ReSize, Ly)
else:
distX = give_Eulerian_Lagrangian_Distance(x[xInds], xL_H_ReSize, Lx)
distY = give_Eulerian_Lagrangian_Distance(y[yInds], yL_H_ReSize, Ly)
# Obtain the Dirac-delta function values.
delta_X = give_Delta_Kernel(distX, dx)
delta_Y = give_Delta_Kernel(distY, dy)
# Perform Integral
move_X, move_Y = give_Me_Perturbed_Distance(u,v,dx,dy,delta_X,delta_Y,xInds,yInds)
# Update the Lagrangian Point Position.
xL_Next = xL_P + (dt) * move_X
yL_Next = yL_P + (dt) * move_Y
# Update the Lagrangian Point Positions with poroelasticity.
if poroelastic_Yes:
#
# poroelastic_info[:,1]: index of poroelastic point
# poroelastic_info[:,2]: Brinkman constant
#
xL_Next[poroelastic_info[0:,0].astype(int)] = xL_Next[poroelastic_info[0:,0].astype(int)] + ( 1/(mu*poroelastic_info[0:,1]) * F_Poro[0:,0] ) * dt
yL_Next[poroelastic_info[0:,0].astype(int)] = yL_Next[poroelastic_info[0:,0].astype(int)] + ( 1/(mu*poroelastic_info[0:,1]) * F_Poro[0:,1] ) * dt
# Shift so that all values are in [0,Lx or Ly).
if not porous_Yes:
xL_Next = xL_Next % Lx
yL_Next = yL_Next % Ly
return (xL_Next, yL_Next)
################################################################################
# FUNCTION: Computes the integral to move each Lagrangian Pt!
################################################################################
def give_Me_Perturbed_Distance(u,v,dx,dy,delta_X,delta_Y,xInds,yInds):
''' Computes the integral to move each Lagrangian Pt.
Args:
u: x-component of velocity (2D array)
v: y-component of velocity (2D array)
delta_X: values of Dirac-delta function in x-direction (2D array)
delta_Y: values of Dirac-delta function in y-direction (2D array)
xInds: x-Indices on fluid grid (2D array)
yInds: y-Indices on fluid grid (2D array)'''
# Compute integrand 'stencil' of velocity x delta for each Lagrangian Pt!
# Fancy indexing allows us to do this directly
mat_X = u[yInds,xInds]*delta_X*delta_Y
mat_Y = v[yInds,xInds]*delta_X*delta_Y
# Approximate Integral of Velocity x Delta for each Lagrangian Pt!
move_X = mat_X.sum(1) * (dx*dy)
move_Y = mat_Y.sum(1) * (dx*dy)
return (move_X, move_Y)
############################################################################################
#
# FUNCTION: finds the indices on the Eulerian grid where the 1D Dirac-delta
# kernel is possibly non-zero in BOTH (x,y) directions
#
############################################################################################
def give_NonZero_Delta_Indices_XY(xLag, yLag, Nx, Ny, dx, dy, supp):
''' Find indices where 1D Dirac-delta kernel is non-zero in both (x,y)
Args:
xLag: gives x-coordinate of Lagrangian position
yLag: gives y-coordinate of Lagrangian position
Nx: # of Eulerian grid pts. in x-dimension
Ny: # of Eulerian grid pts. in y-dimension
dx: spatial-step along x-dimension of Eulerian grid
dy: spatial-step along y-dimension of Eulerian grid
supp: size of support of the Dirac-delta kernel (should be even)
Returns:
xInds: x index
yInds: y index'''
#Give x-dimension Non-Zero Delta Indices
xIndsAux = give_1D_NonZero_Delta_Indices(xLag, Nx, dx, supp)
#Repeat x-Indices for Non-Zero y-Indices!
xInds = []
#Sets up x-INDEX matrix bc we consider BOTH dimensions
xInds = np.tile(xIndsAux,(1,supp)) #tiles matrix in horiz direction
#Give y-dimension Non-Zero Delta Indices
yIndsAux = give_1D_NonZero_Delta_Indices(yLag, Ny, dy, supp)
#Repeat y-Indices for Non-Zero x-Indices!
yInds = np.repeat(yIndsAux,supp,axis=1) #repeats each element horizontally
# supp number of times
#Sets up y-INDEX matrix bc we consider BOTH dimensions
#these are indices, so return ints
return (xInds.astype('int'),yInds.astype('int'))
################################################################################
#
# FUNCTION distance between Eulerian grid data, x, and Lagrangian grid data, y,
# at specifed pts typically and makes sure the distances are
# correct for a periodic [0,L] domain.
#
################################################################################
def give_Eulerian_Lagrangian_Distance(x, y, L):
''' Find dist. between Eulerian grid data and Lagrangian grid data.
[0,L] has periodic boundary condition, so in actuality, the greatest
distance possible is L/2.
Args:
x,y: two matrices that you find the distance between
(x-typically Eulerian data, y-typically Lagrangian data)
L: length of domain, i.e., [0,L]
Returns:
distance: distance'''
distance = abs( x - y )
distance = np.minimum(distance,L-distance) #element-wise minima
return distance
###########################################################################
#
# FUNCTION: computes a discrete approx. to a 1D Dirac-delta function over a
# specified matrix, x, and spatial step-size, dx. It will have support in
# [x-2dx, x+2dx]
#
###########################################################################
@jit(nopython=True)
def give_Delta_Kernel(x,dx):
''' Computes discrete approx. to 1D delta func over x in [x-2dx,x+2dx].
Args:
x: Values in which the delta function will be evaulated
dx: Spatial step-size of grid
Returns:
delta: delta function with support [x-2dx,x+2dx]'''
# Computes Dirac-delta Approximation.
RMAT = np.abs(x)/dx
#Alias the data for cleaner writing of the following step
# RMAT is altered, but it will not be reused.
delta = RMAT
#Loops over to find delta approximation
row,col = x.shape
for ii in range(row):
for jj in range(col):
r = RMAT[ii,jj]
# Approximate Discrete Delta Function
#if r <= 2:
# delta[ii,jj] = 0.25*( 1 + cos(pi*r/2) )
#else:
# delta[ii,jj] = 0
# PESKIN 4-Point Discrete Delta Function
if r<1:
delta[ii,jj] = ( (3 - 2*r + sqrt(1 + 4*r - 4*r*r) ) / (8*dx) )
elif (r<2) and (r>=1):
delta[ii,jj] = ( (5 - 2*r - sqrt(-7 + 12*r - 4*r*r) ) / (8*dx) )
else:
delta[ii,jj] = 0
return delta
###########################################################################
#
# FUNCTION finds the indices on the Eulerian grid where the 1D Dirac-delta
# kernel is possibly non-zero is x-dimension.
#
###########################################################################
def give_1D_NonZero_Delta_Indices(lagPts_j, N, dx, supp):
''' Find the indices on Eulerian grid where 1D delta is non-zero in x dim.
Args:
lagPts_j: row of lagrangian pts for specific coordinate, j= x or y.
N: # spatial resolution of Eulerian grid in each dimension
dx: Spatial step-size on Eulerian (fluid) grid
supp: Size of support of the Dirac-delta kernel (should be even)
Returns:
indices'''
# Finds the index of the lower left Eulerian pt. to Lagrangian pt..
ind_Aux = np.floor(lagPts_j/dx + 1)
# Get all the different x indices that must be considered.
# ind_Aux is 1D. Create 2D array with supp # of columns of ind_Aux
indices = np.tile(ind_Aux,(supp,1)).T #stack row vectors then transpose
#
indices += -supp/2+1+np.arange(supp) #arange returns row array, which
# broadcasts down each column.
# Translate indices between {0,2,..,N-1}
indices = (indices-1) % N
return indices
################################################################################
#
# FUNCTION: update 'massive' immersed boundary position
#
################################################################################
def please_Move_Massive_Boundary(dt_step,mass_info,mVelocity):
''' Update 'massive' immersed boundary position
Args:
dt_step: desired time-step for this position
mass_info: col 1: lag index for mass pt
col 2: massive x-Lag Value
col 3: massive y-Lag Value
col 4: 'mass-spring' stiffness parameter
col 5: MASS parameter value
mVelocity col 1: x-directed Lagrangian velocity
col 2: y-directed Lagrangian velocity
Returns:
mass_info:
massLagsOld:'''
massLagsOld = np.array(mass_info[:,(1, 2)])
# update x-Positions
mass_info[:,1] = mass_info[:,1] + dt_step*mVelocity[:,0]
# update y-Positions
mass_info[:,2] = mass_info[:,2] + dt_step*mVelocity[:,1]
return (mass_info, massLagsOld)
############################################################################################
#
# FUNCTION: update 'massive' immersed boundary velocity
#
############################################################################################
def please_Update_Massive_Boundary_Velocity(dt_step,mass_info,mVelocity,\
F_Mass_Bnd,gravity_Info):
''' Update 'massive' immersed boundary velocity
Args:
dt_step: desired time-step for this position
mass_info: col 1: lag index for mass pt
col 2: massive x-Lag Value
col 3: massive y-Lag Value
col 4: 'mass-spring' stiffness parameter
col 5: MASS parameter value
mVelocity col 1: x-directed Lagrangian velocity
col 2: y-directed Lagrangian velocity
F_Mass_Bnd col 1: x-directed Lagrangian force
col 2: y-directed Lagrangian force
gravity_Info col 1: flag if considering gravity (0 = NO, 1 = YES)
col 2: x-component of gravity vector (NORMALIZED PREVIOUSLY)
col 3: y-component of gravity vector (NORMALIZED PREVIOUSLY)
Returns:
mVelocity_h:'''
ids = mass_info[:,0].astype('int')
mVelocity_h = np.empty(mVelocity.shape)
if gravity_Info[0] == 1:
g = 9.80665 #m/s^2
# update x-Velocity
mVelocity_h[:,0] = mVelocity[:,0] - dt_step * \
( F_Mass_Bnd[ids,0]/mass_info[:,4] - g*gravity_Info[1] )
# update y-Velocity
mVelocity_h[:,1] = mVelocity[:,1] - dt_step * \
( F_Mass_Bnd[ids,1]/mass_info[:,4] - g*gravity_Info[2] )
else:
# update x-Velocity
mVelocity_h[:,0] = mVelocity[:,0] - dt_step*F_Mass_Bnd[ids,0]/mass_info[:,4]
# update y-Velocity
mVelocity_h[:,1] = mVelocity[:,1] - dt_step*F_Mass_Bnd[ids,1]/mass_info[:,4]
return mVelocity_h
########################################################################
#
# FUNCTION: Finds CENTERED finite difference approximation to 1ST
# Derivative in specified direction by input, dz, and 'string'.
# Note: It automatically accounts for periodicity of the domain.
#
########################################################################
def D(u,dz,string):
''' Finds centered 1st derivative in specified direction
Args:
u: velocity
dz: spatial step in "z"-direction
string: specifies which 1ST derivative to take (to enforce periodicity)
Returns:
u_z:'''
u_z = np.zeros(u.shape)
if string=='x':
length = u.shape[1] # number of pts along X direction
#For periodicity on ends
u_z[:,0] = ( u[:,1] - u[:,length-1] ) / (2*dz)
u_z[:,-1]= ( u[:,0] - u[:,length-2] ) / (2*dz)
#Standard Centered Difference
u_z[:,1:length-1] = ( u[:,2:length] - u[:,:length-2] ) / (2*dz)
elif string=='y':
length = u.shape[0] # number of pts along Y direction
#For periodicity on ends
u_z[0,:] = ( u[1,:] - u[length-1,:] ) / (2*dz)
u_z[length-1,:] = ( u[0,:] - u[length-2,:] ) / (2*dz)
#Standard Centered Difference
u_z[1:length-1,:] = ( u[2:length,:] - u[:length-2,:] ) / (2*dz)
else:
print('\n\n\n ERROR IN FUNCTION D FOR COMPUTING 1ST DERIVATIVE\n')
print('Need to specify which desired derivative, x or y.\n\n\n')
return u_z
########################################################################
#
# FUNCTION: Finds CENTERED finite difference approximation to 2ND
# DERIVATIVE in z direction, specified by input and 'string'
# Note: It automatically accounts for periodicity of the domain.
#
########################################################################
def DD(u,dz,string):
''' Finds centered 2nd derivative in z direction, specified by input & string
Args:
u: velocity
dz: spatial step in "z"-direction
string: specifies which 2ND derivative to take (to enforce periodicity)
Returns:
u_zz:'''
u_zz = np.zeros(u.shape)
if string=='x':
length = u.shape[1] # number of pts along X direction
#For periodicity on ends
u_zz[:,0] = ( u[:,1] - 2*u[:,0] + u[:,length-1] ) / (dz**2)
u_zz[:,-1] = ( u[:,0] - 2*u[:,length-1] + u[:,length-2] ) / (dz**2)
#Standard Upwind Scheme (Centered Difference)
u_zz[:,1:length-1] = (u[:,2:length]-2*u[:,1:length-1]+u[:,:length-2])\
/ (dz**2)
elif string=='y':
length = u.shape[0] # number of pts along Y direction
#For periodicity on ends
u_zz[0,:] = ( u[1,:] - 2*u[0,:] + u[length-1,:] ) / (dz**2)
u_zz[-1,:]= ( u[0,:] - 2*u[length-1,:] + u[length-2,:] ) / (dz**2)
#Standard Upwind Scheme (Centered Difference)
u_zz[1:length-1,:] = (u[2:length,:]-2*u[1:length-1,:]+u[:length-2,:])\
/ (dz**2)
else:
print('\n\n\n ERROR IN FUNCTION DD FOR COMPUTING 2ND DERIVATIVE\n')
print('Need to specify which desired derivative, x or y.\n\n\n')
return u_zz
###########################################################################
#
# def: Setting up advection-diffusion solver
#
###########################################################################
def please_Update_Adv_Diff_Concentration(C,dt,dx,dy,uX,uY,k):
# C: concentration
# dt: time-step
# dx,dy: spatial steps in x and y, respectively
# uX: x-Component of Velocity
# uY: y-Component of Velocity
# k: diffusion coefficient
# Performs Upwind Advection WITHOUT Time-Splitting
#C = perform_Time_noSplit_Upwind(C,dt,dx,dy,uX,uY,k)
# Performs Upwind Advection w/ Time-Splitting
C = perform_Time_Split_Upwind(C,dt,dx,dy,uX,uY,k)
#laplacian_C=1 # DUMMY VARIABLE (laplacian not used anywhere else in code.)
return C
###########################################################################
#
# def: Advection-Diffusion Split Upwind Method
#
###########################################################################
def perform_Time_noSplit_Upwind(C,dt,dx,dy,uX,uY,k):
# Compute Necessary Derivatives (Note: these calculations could be parallalized)
Cx = give_Necessary_Derivative(C,dx,uX,'x')
Cy = give_Necessary_Derivative(C,dy,uY,'y')
Cxx = DD(C,dx,'x')
Cyy = DD(C,dy,'y')
# Forms Laplacian
laplacian_C = Cxx+Cyy
# UPWIND
C = C + dt * ( k*(laplacian_C) - uX*Cx - uY*Cy )
return C
###########################################################################
#
# def: Advection-Diffusion Split Upwind Method
#
###########################################################################
def perform_Time_Split_Upwind(C,dt,dx,dy,uX,uY,k):
# Compute Necessary Derivatives for x-Advection
Cx = give_Necessary_Derivative(C,dx,uX,'x')
Cxx = DD(C,dx,'x')
# Time-step #1 (give auxillary)
C = C + dt * ( k*(Cxx) - uX*Cx )
# Compute Necessary Derivatives for y-Advection
Cy = give_Necessary_Derivative(C,dy,uY,'y')
Cyy = DD(C,dy,'y')
# Time-step #2 (give next iteration)
C = C + dt * ( k*(Cyy) - uY*Cy )
return C
###########################################################################
#
# def: Computes derivative based on sign of Velocity, u, using UPWIND
# approach
#
###########################################################################
def give_Necessary_Derivative(C,dz,uZ,string):
C_z = np.zeros(C.shape)
signs = np.sign(uZ)
[lenY,lenX] = uZ.shape
if string=='x':
#For periodicity on ends w/ UPWIND
for i in range(0,lenY):
#left side of grid
if signs[i,0] <= 0:
C_z[i,0] = ( C[i,1] - C[i,0] ) / (dz)
else:
C_z[i,0] = ( C[i,0] - C[i,lenX-1] ) / (dz)
#right side of grid
if signs[i,lenX-1] <= 0:
C_z[i,lenX-1] = ( C[i,0] - C[i,lenX-1] ) / (dz)
else:
C_z[i,lenX-1] = ( C[i,lenX-1] - C[i,lenX-2] ) / (dz)
#Standard Upwind
for i in range(0,lenY):
for j in range(1,lenX-2):
if signs[i,j] <= 0:
C_z[i,j] = ( C[i,j+1] - C[i,j] ) / (dz)
else:
C_z[i,j] = ( C[i,j] - C[i,j-1] ) / (dz)
# Ends x-Direction calculation #
elif string=='y':
#For periodicity on ends w/ UPWIND
for i in range(0,lenX):
#bottom of grid
if signs[0,i] <= 0:
C_z[0,i] = ( C[1,i] - C[0,i] ) / (dz)
else:
C_z[0,i] = ( C[0,i] - C[lenY-1,i] ) / (dz)
#top of grid
if signs[lenY-1,i] <= 0:
C_z[lenY-1,i] = ( C[0,i] - C[lenY-1,i] ) / (dz)
else:
C_z[lenY-1,i] = ( C[lenY-1,i] - C[lenY-2,i] ) / (dz)
#Standard Upwind
for i in range(1,lenY-2):
for j in range(0,lenX):
if signs[i,j] <= 0:
C_z[i,j] = ( C[i+1,j] - C[i,j] ) / (dz)
else:
C_z[i,j] = ( C[i,j] - C[i-1,j] ) / (dz)
# Ends y-Direction calculation #
else:
print('\n\n\n ERROR IN def D FOR COMPUTING 1ST DERIVATIVE\n')
print('Need to specify which desired derivative, x or y.\n\n\n')
return C_z
###########################################################################
#
# FUNCTION: Setting up advection-diffusion solver
#
###########################################################################
#def please_Update_Adv_Diff_Concentration(C,dt,dx,dy,uX,uY,k):
'''Setting up advection-diffusion solver
Note: This function alters C internally!
Args:
C: concentration
dt: time-step
dx,dy: spatial steps in x and y, respectively
uX: x-Component of Velocity
uY: y-Component of Velocity
k: diffusion coefficient
Returns:
C:'''
# # Compute Necessary Derivatives for x-Advection
# Cx = D(C,dx,'x')
# Cxx = DD(C,dx,'x')
# # Time-step #1 (give auxillary)
# C = C + dt * ( k*(Cxx) - uX*Cx )
# # Compute Necessary Derivatives for y-Advection
# Cy = D(C,dy,'y')
# Cyy = DD(C,dy,'y')
# # Time-step #2 (give next iterative for C)
# C = C + dt * ( k*(Cyy) - uY*Cy )
# return C
| gpl-3.0 |
benob/icsisumm | icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/corpus/reader/chunked.py | 9 | 8055 | # Natural Language Toolkit: Chunked Corpus Reader
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Steven Bird <[email protected]>
# Edward Loper <[email protected]>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
A reader for corpora that contain chunked (and optionally tagged)
documents.
"""
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
from nltk.tree import Tree
from nltk.tokenize import *
from nltk import chunk
import os.path
class ChunkedCorpusReader(CorpusReader):
"""
Reader for chunked (and optionally tagged) corpora. Paragraphs
are split using a block reader. They are then tokenized into
sentences using a sentence tokenizer. Finally, these sentences
are parsed into chunk trees using a string-to-chunktree conversion
function. Each of these steps can be performed using a default
function or a custom function. By default, paragraphs are split
on blank lines; sentences are listed one per line; and sentences
are parsed into chunk trees using L{chunk.tagstr2tree}.
"""
def __init__(self, root, files, extension='',
str2chunktree=chunk.tagstr2tree,
sent_tokenizer=RegexpTokenizer('\n', gaps=True),
para_block_reader=read_blankline_block):
"""
@param root: The root directory for this corpus.
@param files: A list or regexp specifying the files in this corpus.
"""
CorpusReader.__init__(self, root, files)
self._cv_args = (str2chunktree, sent_tokenizer, para_block_reader)
"""Arguments for corpus views generated by this corpus: a tuple
(str2chunktree, sent_tokenizer, para_block_tokenizer)"""
def raw(self, files=None):
"""
@return: the given file or files as a single string.
@rtype: C{str}
"""
return concat([open(filename).read()
for filename in self.abspaths(files)])
def words(self, files=None):
"""
@return: the given file or files as a list of words
and punctuation symbols.
@rtype: C{list} of C{str}
"""
return concat([ChunkedCorpusView(f, 0, 0, 0, 0, *self._cv_args)
for f in self.abspaths(files)])
def sents(self, files=None):
"""
@return: the given file or files as a list of
sentences or utterances, each encoded as a list of word
strings.
@rtype: C{list} of (C{list} of C{str})
"""
return concat([ChunkedCorpusView(f, 0, 1, 0, 0, *self._cv_args)
for f in self.abspaths(files)])
def paras(self, files=None):
"""
@return: the given file or files as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as lists of word strings.
@rtype: C{list} of (C{list} of (C{list} of C{str}))
"""
return concat([ChunkedCorpusView(f, 0, 1, 1, 0, *self._cv_args)
for f in self.abspaths(files)])
def tagged_words(self, files=None):
"""
@return: the given file or files as a list of tagged
words and punctuation symbols, encoded as tuples
C{(word,tag)}.
@rtype: C{list} of C{(str,str)}
"""
return concat([ChunkedCorpusView(f, 1, 0, 0, 0, *self._cv_args)
for f in self.abspaths(files)])
def tagged_sents(self, files=None):
"""
@return: the given file or files as a list of
sentences, each encoded as a list of C{(word,tag)} tuples.
@rtype: C{list} of (C{list} of C{(str,str)})
"""
return concat([ChunkedCorpusView(f, 1, 1, 0, 0, *self._cv_args)
for f in self.abspaths(files)])
def tagged_paras(self, files=None):
"""
@return: the given file or files as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as lists of C{(word,tag)} tuples.
@rtype: C{list} of (C{list} of (C{list} of C{(str,str)}))
"""
return concat([ChunkedCorpusView(f, 1, 1, 1, 0, *self._cv_args)
for f in self.abspaths(files)])
def chunked_words(self, files=None):
"""
@return: the given file or files as a list of tagged
words and chunks. Words are encoded as C{(word, tag)}
tuples (if the corpus has tags) or word strings (if the
corpus has no tags). Chunks are encoded as depth-one
trees over C{(word,tag)} tuples or word strings.
@rtype: C{list} of (C{(str,str)} and L{Tree})
"""
return concat([ChunkedCorpusView(f, 1, 0, 0, 1, *self._cv_args)
for f in self.abspaths(files)])
def chunked_sents(self, files=None):
"""
@return: the given file or file as a list of
sentences, each encoded as a shallow C{Tree}. The leaves
of these trees are encoded as C{(word, tag)} tuples (if
the corpus has tags) or word strings (if the corpus has no
tags).
@rtype: C{list} of L{Tree}
"""
return concat([ChunkedCorpusView(f, 1, 1, 0, 1, *self._cv_args)
for f in self.abspaths(files)])
def chunked_paras(self, files=None):
"""
@return: the given file or files as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as a shallow C{Tree}. The leaves of these
trees are encoded as C{(word, tag)} tuples (if the corpus
has tags) or word strings (if the corpus has no tags).
@rtype: C{list} of (C{list} of L{Tree})
"""
return concat([ChunkedCorpusView(f, 1, 1, 1, 1, *self._cv_args)
for f in self.abspaths(files)])
def _read_block(self, stream):
return [chunk.tagstr2tree(t) for t in
read_blankline_block(stream)]
class ChunkedCorpusView(StreamBackedCorpusView):
def __init__(self, filename, tagged, group_by_sent, group_by_para,
chunked, str2chunktree, sent_tokenizer, para_block_reader):
StreamBackedCorpusView.__init__(self, filename)
self._tagged = tagged
self._group_by_sent = group_by_sent
self._group_by_para = group_by_para
self._chunked = chunked
self._str2chunktree = str2chunktree
self._sent_tokenizer = sent_tokenizer
self._para_block_reader = para_block_reader
def read_block(self, stream):
block = []
for para_str in self._para_block_reader(stream):
para = []
for sent_str in self._sent_tokenizer.tokenize(para_str):
sent = self._str2chunktree(sent_str)
# If requested, throw away the tags.
if not self._tagged:
sent = self._untag(sent)
# If requested, throw away the chunks.
if not self._chunked:
sent = sent.leaves()
# Add the sentence to `para`.
if self._group_by_sent:
para.append(sent)
else:
para.extend(sent)
# Add the paragraph to `block`.
if self._group_by_para:
block.append(para)
else:
block.extend(para)
# Return the block
return block
def _untag(self, tree):
for i, child in enumerate(tree):
if isinstance(child, Tree):
self._untag(child)
elif isinstance(child, tuple):
tree[i] = child[0]
else:
raise ValueError('expected child to be Tree or tuple')
return tree
| gpl-3.0 |
limingzhou/aliyun-cli | aliyuncli/colorama/ansi.py | 442 | 2304 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
OSC = '\033]'
BEL = '\007'
def code_to_chars(code):
return CSI + str(code) + 'm'
class AnsiCodes(object):
def __init__(self, codes):
for name in dir(codes):
if not name.startswith('_'):
value = getattr(codes, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + "A"
def DOWN(self, n=1):
return CSI + str(n) + "B"
def FORWARD(self, n=1):
return CSI + str(n) + "C"
def BACK(self, n=1):
return CSI + str(n) + "D"
def POS(self, x=1, y=1):
return CSI + str(y) + ";" + str(x) + "H"
def set_title(title):
return OSC + "2;" + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + "J"
def clear_line(mode=2):
return CSI + str(mode) + "K"
class AnsiFore:
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack:
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle:
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiCodes( AnsiFore )
Back = AnsiCodes( AnsiBack )
Style = AnsiCodes( AnsiStyle )
Cursor = AnsiCursor()
| apache-2.0 |
openstack/manila | manila/tests/share/drivers/hpe/test_hpe_3par_constants.py | 1 | 7527 | # Copyright 2015 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
CIFS = 'CIFS'
SMB_LOWER = 'smb'
NFS = 'NFS'
NFS_LOWER = 'nfs'
IP = 'ip'
USER = 'user'
USERNAME = 'USERNAME_0'
ADD_USERNAME = '+USERNAME_0:fullcontrol'
DROP_USERNAME = '-USERNAME_0:fullcontrol'
PASSWORD = 'PASSWORD_0'
READ_WRITE = 'rw'
READ_ONLY = 'ro'
SAN_LOGIN = 'testlogin4san'
SAN_PASSWORD = 'testpassword4san'
API_URL = 'https://1.2.3.4:8080/api/v1'
TIMEOUT = 60
PORT = 22
SHARE_TYPE_ID = 123456789
CIDR_PREFIX = '24'
# Constants to use with Mock and expect in results
EXPECTED_IP_10203040 = '10.20.30.40'
EXPECTED_IP_10203041 = '10.20.30.41'
EXPECTED_IP_1234 = '1.2.3.4'
EXPECTED_MY_IP = '9.8.7.6'
EXPECTED_IP_127 = '127.0.0.1'
EXPECTED_IP_127_2 = '127.0.0.2'
EXPECTED_ACCESS_LEVEL = 'foo_access'
EXPECTED_SUBNET = '255.255.255.0' # based on CIDR_PREFIX above
EXPECTED_VLAN_TYPE = 'vlan'
EXPECTED_VXLAN_TYPE = 'vxlan'
EXPECTED_VLAN_TAG = '101'
EXPECTED_SERVER_ID = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e'
EXPECTED_PROJECT_ID = 'osf-nfs-project-id'
SHARE_ID = 'share-id'
EXPECTED_SHARE_ID = 'osf-share-id'
EXPECTED_SHARE_ID_RO = 'osf-ro-share-id'
EXPECTED_SHARE_NAME = 'share-name'
EXPECTED_NET_NAME = 'testnet'
EXPECTED_FPG = 'pool'
EXPECTED_HOST = 'hostname@backend#' + EXPECTED_FPG
UNEXPECTED_FPG = 'not_a_pool'
UNEXPECTED_HOST = 'hostname@backend#' + UNEXPECTED_FPG
HOST_WITHOUT_POOL_1 = 'hostname@backend'
HOST_WITHOUT_POOL_2 = 'hostname@backend#'
EXPECTED_SHARE_PATH = '/anyfpg/anyvfs/anyfstore'
EXPECTED_SIZE_1 = 1
EXPECTED_SIZE_2 = 2
EXPECTED_SNAP_NAME = 'osf-snap-name'
EXPECTED_SNAP_ID = 'osf-snap-id'
EXPECTED_STATS = {'test': 'stats'}
EXPECTED_FPG_CONF = [{EXPECTED_FPG: [EXPECTED_IP_10203040]}]
EXPECTED_FSTORE = EXPECTED_PROJECT_ID
EXPECTED_VFS = 'test_vfs'
EXPECTED_GET_VFS = {'vfsname': EXPECTED_VFS,
'vfsip': {'address': [EXPECTED_IP_10203040]}}
EXPECTED_GET_VFS_MULTIPLES = {
'vfsname': EXPECTED_VFS,
'vfsip': {'address': [EXPECTED_IP_10203041, EXPECTED_IP_10203040]}}
EXPECTED_CLIENT_GET_VFS_MEMBERS_MULTI = {
'fspname': EXPECTED_VFS,
'vfsip': [
{'networkName': EXPECTED_NET_NAME,
'fspool': EXPECTED_VFS,
'address': EXPECTED_IP_10203040,
'prefixLen': EXPECTED_SUBNET,
'vfs': EXPECTED_VFS,
'vlanTag': EXPECTED_VLAN_TAG,
},
{'networkName': EXPECTED_NET_NAME,
'fspool': EXPECTED_VFS,
'address': EXPECTED_IP_10203041,
'prefixLen': EXPECTED_SUBNET,
'vfs': EXPECTED_VFS,
'vlanTag': EXPECTED_VLAN_TAG,
},
],
'vfsname': EXPECTED_VFS,
}
EXPECTED_MEDIATOR_GET_VFS_RET_VAL_MULTI = {
'fspname': EXPECTED_VFS,
'vfsip': {
'networkName': EXPECTED_NET_NAME,
'fspool': EXPECTED_VFS,
'address': [
EXPECTED_IP_10203040,
EXPECTED_IP_10203041,
],
'prefixLen': EXPECTED_SUBNET,
'vfs': EXPECTED_VFS,
'vlanTag': EXPECTED_VLAN_TAG
},
'vfsname': EXPECTED_VFS,
}
EXPECTED_CLIENT_GET_VFS_MEMBERS = {
'fspname': EXPECTED_VFS,
'vfsip': {
'networkName': EXPECTED_NET_NAME,
'fspool': EXPECTED_VFS,
'address': EXPECTED_IP_10203040,
'prefixLen': EXPECTED_SUBNET,
'vfs': EXPECTED_VFS,
'vlanTag': EXPECTED_VLAN_TAG,
},
'vfsname': EXPECTED_VFS,
}
EXPECTED_MEDIATOR_GET_VFS_RET_VAL = {
'fspname': EXPECTED_VFS,
'vfsip': {
'networkName': EXPECTED_NET_NAME,
'fspool': EXPECTED_VFS,
'address': [EXPECTED_IP_10203040],
'prefixLen': EXPECTED_SUBNET,
'vfs': EXPECTED_VFS,
'vlanTag': EXPECTED_VLAN_TAG,
},
'vfsname': EXPECTED_VFS,
}
EXPECTED_CLIENT_GET_VFS_RETURN_VALUE = {
'total': 1,
'members': [EXPECTED_CLIENT_GET_VFS_MEMBERS],
}
EXPECTED_CLIENT_GET_VFS_RETURN_VALUE_MULTI = {
'total': 1,
'members': [EXPECTED_CLIENT_GET_VFS_MEMBERS_MULTI],
}
EXPECTED_FPG_MAP = {EXPECTED_FPG: {EXPECTED_VFS: [EXPECTED_IP_10203040]}}
EXPECTED_FPG_MAP_MULTI_VFS = {EXPECTED_FPG: {
EXPECTED_VFS: [EXPECTED_IP_10203041, EXPECTED_IP_10203040]}}
EXPECTED_SHARE_IP = '10.50.3.8'
EXPECTED_HPE_DEBUG = True
EXPECTED_COMMENT = "OpenStack Manila - foo-comment"
EXPECTED_EXTRA_SPECS = {}
EXPECTED_LOCATION = ':'.join((EXPECTED_IP_1234, EXPECTED_SHARE_PATH))
EXPECTED_SUPER_SHARE = 'OPENSTACK_SUPER_SHARE'
EXPECTED_SUPER_SHARE_COMMENT = ('OpenStack super share used to delete nested '
'shares.')
EXPECTED_CIFS_DOMAIN = 'LOCAL_CLUSTER'
EXPECTED_MOUNT_PATH = '/mnt/'
SHARE_SERVER = {
'backend_details': {
'ip': EXPECTED_IP_10203040,
'fpg': EXPECTED_FPG,
'vfs': EXPECTED_VFS,
},
}
# Access rules. Allow for overwrites.
ACCESS_RULE_NFS = {
'access_type': IP,
'access_to': EXPECTED_IP_1234,
'access_level': READ_WRITE,
}
ACCESS_RULE_CIFS = {
'access_type': USER,
'access_to': USERNAME,
'access_level': READ_WRITE,
}
ADD_RULE_BAD_TYPE = {
'access_type': 'unsupported_other_type',
'access_to': USERNAME,
'access_level': READ_WRITE,
}
ADD_RULE_IP = {
'access_type': IP,
'access_to': EXPECTED_IP_1234,
'access_level': READ_WRITE,
}
ADD_RULE_IP_RO = {
'access_type': IP,
'access_to': EXPECTED_IP_1234,
'access_level': READ_ONLY,
}
ADD_RULE_USER = {
'access_type': USER,
'access_to': USERNAME,
'access_level': READ_WRITE,
}
DELETE_RULE_IP = {
'access_type': IP,
'access_to': EXPECTED_IP_1234,
'access_level': READ_WRITE,
}
DELETE_RULE_USER = {
'access_type': USER,
'access_to': USERNAME,
'access_level': READ_WRITE,
}
DELETE_RULE_IP_RO = {
'access_type': IP,
'access_to': EXPECTED_IP_1234,
'access_level': READ_ONLY,
}
GET_FSQUOTA = {'message': None,
'total': 1,
'members': [{'hardBlock': '1024', 'softBlock': '1024'}]}
EXPECTED_FSIP = {
'fspool': EXPECTED_FPG,
'vfs': EXPECTED_VFS,
'address': EXPECTED_IP_1234,
'prefixLen': EXPECTED_SUBNET,
'vlanTag': EXPECTED_VLAN_TAG,
}
OTHER_FSIP = {
'fspool': EXPECTED_FPG,
'vfs': EXPECTED_VFS,
'address': '9.9.9.9',
'prefixLen': EXPECTED_SUBNET,
'vlanTag': EXPECTED_VLAN_TAG,
}
NFS_SHARE_INFO = {
'project_id': EXPECTED_PROJECT_ID,
'id': EXPECTED_SHARE_ID,
'share_proto': NFS,
'export_location': EXPECTED_LOCATION,
'size': 1234,
'host': EXPECTED_HOST,
}
SNAPSHOT_INFO = {
'name': EXPECTED_SNAP_NAME,
'id': EXPECTED_SNAP_ID,
'share': {
'project_id': EXPECTED_PROJECT_ID,
'id': EXPECTED_SHARE_ID,
'share_proto': NFS,
'export_location': EXPECTED_LOCATION,
'host': EXPECTED_HOST,
},
}
SNAPSHOT_INSTANCE = {
'name': EXPECTED_SNAP_NAME,
'id': EXPECTED_SNAP_ID,
'share_id': EXPECTED_SHARE_ID,
'share_proto': NFS,
}
class FakeException(Exception):
pass
FAKE_EXCEPTION = FakeException("Fake exception for testing.")
| apache-2.0 |
nke001/attention-lvcsr | libs/blocks/tests/test_variable_filter.py | 2 | 3988 | from nose.tools import raises
from blocks.bricks import Bias, Linear, Logistic
from blocks.bricks.parallel import Merge
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph
from blocks.roles import BIAS, FILTER, PARAMETER, OUTPUT
from theano import tensor
def test_variable_filter():
# Creating computation graph
brick1 = Linear(input_dim=2, output_dim=2, name='linear1')
brick2 = Bias(2, name='bias1')
activation = Logistic(name='sigm')
x = tensor.vector()
h1 = brick1.apply(x)
h2 = activation.apply(h1)
h2.name = "h2act"
y = brick2.apply(h2)
cg = ComputationGraph(y)
parameters = [brick1.W, brick1.b, brick2.parameters[0]]
bias = [brick1.b, brick2.parameters[0]]
brick1_bias = [brick1.b]
# Testing filtering by role
role_filter = VariableFilter(roles=[PARAMETER])
assert parameters == role_filter(cg.variables)
role_filter = VariableFilter(roles=[FILTER])
assert [] == role_filter(cg.variables)
# Testing filtering by role using each_role flag
role_filter = VariableFilter(roles=[PARAMETER, BIAS])
assert parameters == role_filter(cg.variables)
role_filter = VariableFilter(roles=[PARAMETER, BIAS], each_role=True)
assert not parameters == role_filter(cg.variables)
assert bias == role_filter(cg.variables)
# Testing filtering by bricks classes
brick_filter = VariableFilter(roles=[BIAS], bricks=[Linear])
assert brick1_bias == brick_filter(cg.variables)
# Testing filtering by bricks instances
brick_filter = VariableFilter(roles=[BIAS], bricks=[brick1])
assert brick1_bias == brick_filter(cg.variables)
# Testing filtering by brick instance
brick_filter = VariableFilter(roles=[BIAS], bricks=[brick1])
assert brick1_bias == brick_filter(cg.variables)
# Testing filtering by name
name_filter = VariableFilter(name='W_norm')
assert [cg.variables[2]] == name_filter(cg.variables)
# Testing filtering by name regex
name_filter_regex = VariableFilter(name_regex='W_no.?m')
assert [cg.variables[2]] == name_filter_regex(cg.variables)
# Testing filtering by theano name
theano_name_filter = VariableFilter(theano_name='h2act')
assert [cg.variables[11]] == theano_name_filter(cg.variables)
# Testing filtering by theano name regex
theano_name_filter_regex = VariableFilter(theano_name_regex='h2a.?t')
assert [cg.variables[11]] == theano_name_filter_regex(cg.variables)
# Testing filtering by application
appli_filter = VariableFilter(applications=[brick1.apply])
variables = [cg.variables[1], cg.variables[8]]
assert variables == appli_filter(cg.variables)
# Testing filtering by application
appli_filter_list = VariableFilter(applications=[brick1.apply])
assert variables == appli_filter_list(cg.variables)
input1 = tensor.matrix('input1')
input2 = tensor.matrix('input2')
merge = Merge(['input1', 'input2'], [5, 6], 2)
merged = merge.apply(input1, input2)
merge_cg = ComputationGraph(merged)
outputs = VariableFilter(
roles=[OUTPUT], bricks=[merge])(merge_cg.variables)
assert merged in outputs
assert len(outputs) == 3
outputs_application = VariableFilter(
roles=[OUTPUT], applications=[merge.apply])(merge_cg.variables)
assert outputs_application == [merged]
@raises(TypeError)
def test_variable_filter_roles_error():
# Creating computation graph
brick1 = Linear(input_dim=2, output_dim=2, name='linear1')
x = tensor.vector()
h1 = brick1.apply(x)
cg = ComputationGraph(h1)
# testing role error
VariableFilter(roles=PARAMETER)(cg.variables)
@raises(TypeError)
def test_variable_filter_applications_error():
# Creating computation graph
brick1 = Linear(input_dim=2, output_dim=2, name='linear1')
x = tensor.vector()
h1 = brick1.apply(x)
cg = ComputationGraph(h1)
VariableFilter(applications=brick1.apply)(cg.variables)
| mit |
jpfeil/toil-scripts | src/toil_scripts/gatk_germline/hard_filter.py | 2 | 6773 | #!/usr/bin/env python2.7
import os
from toil.job import PromisedRequirement
from toil_lib.tools.variant_manipulation import gatk_select_variants, \
gatk_variant_filtration, gatk_combine_variants
from toil_scripts.gatk_germline.common import output_file_job
def hard_filter_pipeline(job, uuid, vcf_id, config):
"""
Runs GATK Hard Filtering on a Genomic VCF file and uploads the results.
0: Start 0 --> 1 --> 3 --> 5 --> 6
1: Select SNPs | |
2: Select INDELs +-> 2 --> 4 +
3: Apply SNP Filter
4: Apply INDEL Filter
5: Merge SNP and INDEL VCFs
6: Write filtered VCF to output directory
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique sample identifier
:param str vcf_id: VCF FileStoreID
:param Namespace config: Pipeline configuration options and shared files
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.snp_filter_name Name of SNP filter for VCF header
config.snp_filter_expression SNP JEXL filter expression
config.indel_filter_name Name of INDEL filter for VCF header
config.indel_filter_expression INDEL JEXL filter expression
config.xmx Java heap size in bytes
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
:return: SNP and INDEL FileStoreIDs
:rtype: tuple
"""
job.fileStore.logToMaster('Running Hard Filter on {}'.format(uuid))
# Get the total size of the genome reference
genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size
# The SelectVariants disk requirement depends on the input VCF, the genome reference files,
# and the output VCF. The output VCF is smaller than the input VCF. The disk requirement
# is identical for SNPs and INDELs.
select_variants_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size,
vcf_id,
genome_ref_size)
select_snps = job.wrapJobFn(gatk_select_variants,
'SNP',
vcf_id,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
memory=config.xmx,
disk=select_variants_disk)
# The VariantFiltration disk requirement depends on the input VCF, the genome reference files,
# and the output VCF. The filtered VCF is smaller than the input VCF.
snp_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size,
select_snps.rv(),
genome_ref_size)
snp_filter = job.wrapJobFn(gatk_variant_filtration,
select_snps.rv(),
config.snp_filter_name,
config.snp_filter_expression,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
memory=config.xmx,
disk=snp_filter_disk)
select_indels = job.wrapJobFn(gatk_select_variants,
'INDEL',
vcf_id,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
memory=config.xmx,
disk=select_variants_disk)
indel_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size,
select_indels.rv(),
genome_ref_size)
indel_filter = job.wrapJobFn(gatk_variant_filtration,
select_indels.rv(),
config.indel_filter_name,
config.indel_filter_expression,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
memory=config.xmx,
disk=indel_filter_disk)
# The CombineVariants disk requirement depends on the SNP and INDEL input VCFs and the
# genome reference files. The combined VCF is approximately the same size as the input files.
combine_vcfs_disk = PromisedRequirement(lambda vcf1, vcf2, ref_size:
2 * (vcf1.size + vcf2.size) + ref_size,
indel_filter.rv(),
snp_filter.rv(),
genome_ref_size)
combine_vcfs = job.wrapJobFn(gatk_combine_variants,
{'SNPs': snp_filter.rv(), 'INDELs': indel_filter.rv()},
config.genome_fasta,
config.genome_fai,
config.genome_dict,
merge_option='UNSORTED', # Merges variants from a single sample
memory=config.xmx,
disk=combine_vcfs_disk)
job.addChild(select_snps)
job.addChild(select_indels)
select_snps.addChild(snp_filter)
snp_filter.addChild(combine_vcfs)
select_indels.addChild(indel_filter)
indel_filter.addChild(combine_vcfs)
# Output the hard filtered VCF
output_dir = os.path.join(config.output_dir, uuid)
output_filename = '%s.hard_filter%s.vcf' % (uuid, config.suffix)
output_vcf = job.wrapJobFn(output_file_job,
output_filename,
combine_vcfs.rv(),
output_dir,
s3_key_path=config.ssec,
disk=PromisedRequirement(lambda x: x.size, combine_vcfs.rv()))
combine_vcfs.addChild(output_vcf)
return combine_vcfs.rv()
| apache-2.0 |
djr7C4/aenea | client/aenea/dragonfly_mock.py | 6 | 1679 | # This file is part of Aenea
#
# Aenea is free software: you can redistribute it and/or modify it under
# the terms of version 3 of the GNU Lesser General Public License as
# published by the Free Software Foundation.
#
# Aenea is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Aenea. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (2014) Alex Roper
# Alex Roper <[email protected]>
'''Mock module to allow importing aenea on linux (mostly so you can run tests
locally.'''
print 'Unable to import Dragonfly (safe to ignore if running tests).'
class ActionBase(object):
def __init__(self, spec):
self.actions = self._parse_spec(spec)
def execute(self, data=None):
self._execute_events(self.actions)
class DynStrActionBase(ActionBase):
pass
class Context(object):
pass
DictList = lambda name: dict()
List = lambda name: list()
class _WindowInfo(object):
executable = None
title = None
handle = None
class Window(object):
@staticmethod
def get_foreground():
return _WindowInfo
class Repetition(object):
def __init__(self, child, min=1, max=None, name=None, default=None):
pass
class Choice(object):
def __init__(self, name, choices, extras=None, default=None):
pass
class AppContext(object):
def __init__(self, *a, **kw):
pass
def matches(self, *a, **kw):
return True
| lgpl-3.0 |
yoelk/kivy | kivy/tools/gles_compat/subset_gles.py | 54 | 4832 | '''
Common GLES Subset Extraction Script
====================================
In Kivy, our goal is to use OpenGL ES 2.0 (GLES2) for all drawing on all
platforms. The problem is that GLES2 is not a proper subset of any OpenGL
Desktop (GL) version prior to version 4.1.
However, to keep all our drawing cross-platform compatible, we're
restricting the Kivy drawing core to a real subset of GLES2 that is
available on all platforms.
This script therefore parses the GL and GL Extension (GLEXT) headers and
compares them with the GLES2 header. It then generates a header that only
contains symbols that are common to GLES2 and at least either GL or GLEXT.
However, since GLES2 doesn't support double values, we also need to do some
renaming, because functions in GL that took doubles as arguments now take
floats in GLES2, with their function name being suffixed with 'f'.
Furthermore, sometimes the pure symbol name doesn't match because there
might be an _EXT or _ARB or something akin to that at the end of a symbol
name. In that case, we take the symbol from the original header and add
a #define directive to redirect to that symbol from the symbol name without
extension.
'''
from __future__ import print_function
gl = open("/Developer/SDKs/MacOSX10.6.sdk/System/Library/Frameworks/" +
"OpenGL.framework/Versions/A/Headers/gl.h", 'r')
glext = open("/Developer/SDKs/MacOSX10.6.sdk/System/Library/Frameworks/" +
"OpenGL.framework/Versions/A/Headers/glext.h", 'r')
gles = open("gl2.h", 'r')
def add_defines_to_set(header):
symbols = []
lineno = 0
for line in header:
symbol = None
hexcode = None
lineno += 1
line = line.strip()
try:
elements = line.split()
if line.startswith("#define"):
symbol = elements[1]
for element in elements:
if element.startswith("0x"):
hexcode = element
elif line.startswith("typedef"):
symbol = elements[-1]
else:
for element in elements:
if element.startswith("gl"):
symbol = element
if symbol:
symbols.append((symbol, lineno, line, hexcode))
except Exception as e:
print('error:', lineno, ':', line)
print(e)
return symbols
def extract_common_symbols(symbols1, symbols2, already_extracted):
for symbol1, lineno1, line1, hexcode1 in symbols1:
for symbol2, lineno2, line2, hexcode2 in symbols2:
if symbol1 in already_extracted or symbol2 in already_extracted:
continue
if symbol1 == symbol2 + 'f':
# There is no `double` type in GLES; Functions that were using
# a double were renamed with the suffix 'f'.
print("// Different Name; Redefine")
print(line2)
print("#define %s %s" % (symbol1, symbol2))
elif symbol1 == symbol2:
already_extracted.append(symbol1)
print(line1)
if symbol1 == 'GLclampf;':
# See explanation about doubles on GLES above.
print('typedef GLclampf GLclampd;')
elif hexcode1 and hexcode2 and hexcode1 == hexcode2:
already_extracted.append(symbol1)
already_extracted.append(symbol2)
print("// Different Name; Redefine")
print(line2)
print("#define %s %s" % (symbol1, symbol2))
# Generate ------------------------------------------------
# pipe to kivy/kivy/graphics/common_subset.h
gl_symbols = add_defines_to_set(gl)
glext_symbols = add_defines_to_set(glext)
gles_symbols = add_defines_to_set(gles)
print('// GLES 2.0 Header file, generated for Kivy')
print('// Check kivy/kivy/tools/gles_compat/subset_gles.py')
print('#pragma once')
print('#include "gl2platform.h"')
print('#ifdef __cplusplus')
print('extern "C" {')
print('#endif')
# Don't add the same symbol more than once
already_extracted = []
print('\n// Subset common to GLES and GL: ====================================')
extract_common_symbols(gles_symbols, gl_symbols, already_extracted)
print('\n// Subset common to GLES and GLEXT: =================================')
extract_common_symbols(gles_symbols, glext_symbols, already_extracted)
print()
print('// What follows was manually extracted from the GLES2 headers because')
print('// it was not present in any other header.', end=' ')
print('''
#define GL_SHADER_BINARY_FORMATS 0x8DF8
#define GL_RGB565 0x8D62
''')
print('#ifdef __cplusplus')
print('}')
print('#endif')
print()
| mit |
wsmith323/django | django/http/__init__.py | 341 | 1103 | from django.http.cookie import SimpleCookie, parse_cookie
from django.http.request import (
HttpRequest, QueryDict, RawPostDataException, UnreadablePostError,
)
from django.http.response import (
BadHeaderError, FileResponse, Http404, HttpResponse,
HttpResponseBadRequest, HttpResponseForbidden, HttpResponseGone,
HttpResponseNotAllowed, HttpResponseNotFound, HttpResponseNotModified,
HttpResponsePermanentRedirect, HttpResponseRedirect,
HttpResponseServerError, JsonResponse, StreamingHttpResponse,
)
from django.http.utils import conditional_content_removal
__all__ = [
'SimpleCookie', 'parse_cookie', 'HttpRequest', 'QueryDict',
'RawPostDataException', 'UnreadablePostError',
'HttpResponse', 'StreamingHttpResponse', 'HttpResponseRedirect',
'HttpResponsePermanentRedirect', 'HttpResponseNotModified',
'HttpResponseBadRequest', 'HttpResponseForbidden', 'HttpResponseNotFound',
'HttpResponseNotAllowed', 'HttpResponseGone', 'HttpResponseServerError',
'Http404', 'BadHeaderError', 'JsonResponse', 'FileResponse',
'conditional_content_removal',
]
| bsd-3-clause |
leiferikb/bitpop | src/third_party/webpagereplay/third_party/ipaddr/ipaddr_test.py | 46 | 50368 | #!/usr/bin/python
#
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for ipaddr module."""
import unittest
import time
import ipaddr
# Compatibility function to cast str to bytes objects
if issubclass(ipaddr.Bytes, str):
_cb = ipaddr.Bytes
else:
_cb = lambda bytestr: bytes(bytestr, 'charmap')
class IpaddrUnitTest(unittest.TestCase):
def setUp(self):
self.ipv4 = ipaddr.IPv4Network('1.2.3.4/24')
self.ipv4_hostmask = ipaddr.IPv4Network('10.0.0.1/0.255.255.255')
self.ipv6 = ipaddr.IPv6Network('2001:658:22a:cafe:200:0:0:1/64')
def tearDown(self):
del(self.ipv4)
del(self.ipv4_hostmask)
del(self.ipv6)
del(self)
def testRepr(self):
self.assertEqual("IPv4Network('1.2.3.4/32')",
repr(ipaddr.IPv4Network('1.2.3.4')))
self.assertEqual("IPv6Network('::1/128')",
repr(ipaddr.IPv6Network('::1')))
def testAutoMasking(self):
addr1 = ipaddr.IPv4Network('1.1.1.255/24')
addr1_masked = ipaddr.IPv4Network('1.1.1.0/24')
self.assertEqual(addr1_masked, addr1.masked())
addr2 = ipaddr.IPv6Network('2000:cafe::efac:100/96')
addr2_masked = ipaddr.IPv6Network('2000:cafe::/96')
self.assertEqual(addr2_masked, addr2.masked())
# issue57
def testAddressIntMath(self):
self.assertEqual(ipaddr.IPv4Address('1.1.1.1') + 255,
ipaddr.IPv4Address('1.1.2.0'))
self.assertEqual(ipaddr.IPv4Address('1.1.1.1') - 256,
ipaddr.IPv4Address('1.1.0.1'))
self.assertEqual(ipaddr.IPv6Address('::1') + (2**16 - 2),
ipaddr.IPv6Address('::ffff'))
self.assertEqual(ipaddr.IPv6Address('::ffff') - (2**16 - 2),
ipaddr.IPv6Address('::1'))
def testInvalidStrings(self):
def AssertInvalidIP(ip_str):
self.assertRaises(ValueError, ipaddr.IPAddress, ip_str)
AssertInvalidIP("")
AssertInvalidIP("016.016.016.016")
AssertInvalidIP("016.016.016")
AssertInvalidIP("016.016")
AssertInvalidIP("016")
AssertInvalidIP("000.000.000.000")
AssertInvalidIP("000")
AssertInvalidIP("0x0a.0x0a.0x0a.0x0a")
AssertInvalidIP("0x0a.0x0a.0x0a")
AssertInvalidIP("0x0a.0x0a")
AssertInvalidIP("0x0a")
AssertInvalidIP("42.42.42.42.42")
AssertInvalidIP("42.42.42")
AssertInvalidIP("42.42")
AssertInvalidIP("42")
AssertInvalidIP("42..42.42")
AssertInvalidIP("42..42.42.42")
AssertInvalidIP("42.42.42.42.")
AssertInvalidIP("42.42.42.42...")
AssertInvalidIP(".42.42.42.42")
AssertInvalidIP("...42.42.42.42")
AssertInvalidIP("42.42.42.-0")
AssertInvalidIP("42.42.42.+0")
AssertInvalidIP(".")
AssertInvalidIP("...")
AssertInvalidIP("bogus")
AssertInvalidIP("bogus.com")
AssertInvalidIP("192.168.0.1.com")
AssertInvalidIP("12345.67899.-54321.-98765")
AssertInvalidIP("257.0.0.0")
AssertInvalidIP("42.42.42.-42")
AssertInvalidIP("3ffe::1.net")
AssertInvalidIP("3ffe::1::1")
AssertInvalidIP("1::2::3::4:5")
AssertInvalidIP("::7:6:5:4:3:2:")
AssertInvalidIP(":6:5:4:3:2:1::")
AssertInvalidIP("2001::db:::1")
AssertInvalidIP("FEDC:9878")
AssertInvalidIP("+1.+2.+3.4")
AssertInvalidIP("1.2.3.4e0")
AssertInvalidIP("::7:6:5:4:3:2:1:0")
AssertInvalidIP("7:6:5:4:3:2:1:0::")
AssertInvalidIP("9:8:7:6:5:4:3::2:1")
AssertInvalidIP("0:1:2:3::4:5:6:7")
AssertInvalidIP("3ffe:0:0:0:0:0:0:0:1")
AssertInvalidIP("3ffe::10000")
AssertInvalidIP("3ffe::goog")
AssertInvalidIP("3ffe::-0")
AssertInvalidIP("3ffe::+0")
AssertInvalidIP("3ffe::-1")
AssertInvalidIP(":")
AssertInvalidIP(":::")
AssertInvalidIP("::1.2.3")
AssertInvalidIP("::1.2.3.4.5")
AssertInvalidIP("::1.2.3.4:")
AssertInvalidIP("1.2.3.4::")
AssertInvalidIP("2001:db8::1:")
AssertInvalidIP(":2001:db8::1")
AssertInvalidIP(":1:2:3:4:5:6:7")
AssertInvalidIP("1:2:3:4:5:6:7:")
AssertInvalidIP(":1:2:3:4:5:6:")
AssertInvalidIP("192.0.2.1/32")
AssertInvalidIP("2001:db8::1/128")
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network, '')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network,
'google.com')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network,
'::1.2.3.4')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network, '')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'google.com')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'1.2.3.4')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'cafe:cafe::/128/190')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'1234:axy::b')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address,
'1234:axy::b')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address,
'2001:db8:::1')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address,
'2001:888888::1')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Address(1)._ip_int_from_string,
'1.a.2.3')
self.assertEqual(False, ipaddr.IPv4Network(1)._is_hostmask('1.a.2.3'))
def testGetNetwork(self):
self.assertEqual(int(self.ipv4.network), 16909056)
self.assertEqual(str(self.ipv4.network), '1.2.3.0')
self.assertEqual(str(self.ipv4_hostmask.network), '10.0.0.0')
self.assertEqual(int(self.ipv6.network),
42540616829182469433403647294022090752)
self.assertEqual(str(self.ipv6.network),
'2001:658:22a:cafe::')
self.assertEqual(str(self.ipv6.hostmask),
'::ffff:ffff:ffff:ffff')
def testBadVersionComparison(self):
# These should always raise TypeError
v4addr = ipaddr.IPAddress('1.1.1.1')
v4net = ipaddr.IPNetwork('1.1.1.1')
v6addr = ipaddr.IPAddress('::1')
v6net = ipaddr.IPAddress('::1')
self.assertRaises(TypeError, v4addr.__lt__, v6addr)
self.assertRaises(TypeError, v4addr.__gt__, v6addr)
self.assertRaises(TypeError, v4net.__lt__, v6net)
self.assertRaises(TypeError, v4net.__gt__, v6net)
self.assertRaises(TypeError, v6addr.__lt__, v4addr)
self.assertRaises(TypeError, v6addr.__gt__, v4addr)
self.assertRaises(TypeError, v6net.__lt__, v4net)
self.assertRaises(TypeError, v6net.__gt__, v4net)
def testMixedTypeComparison(self):
v4addr = ipaddr.IPAddress('1.1.1.1')
v4net = ipaddr.IPNetwork('1.1.1.1/32')
v6addr = ipaddr.IPAddress('::1')
v6net = ipaddr.IPNetwork('::1/128')
self.assertFalse(v4net.__contains__(v6net))
self.assertFalse(v6net.__contains__(v4net))
self.assertRaises(TypeError, lambda: v4addr < v4net)
self.assertRaises(TypeError, lambda: v4addr > v4net)
self.assertRaises(TypeError, lambda: v4net < v4addr)
self.assertRaises(TypeError, lambda: v4net > v4addr)
self.assertRaises(TypeError, lambda: v6addr < v6net)
self.assertRaises(TypeError, lambda: v6addr > v6net)
self.assertRaises(TypeError, lambda: v6net < v6addr)
self.assertRaises(TypeError, lambda: v6net > v6addr)
# with get_mixed_type_key, you can sort addresses and network.
self.assertEqual([v4addr, v4net], sorted([v4net, v4addr],
key=ipaddr.get_mixed_type_key))
self.assertEqual([v6addr, v6net], sorted([v6net, v6addr],
key=ipaddr.get_mixed_type_key))
def testIpFromInt(self):
self.assertEqual(self.ipv4.ip, ipaddr.IPv4Network(16909060).ip)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, 2**32)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, -1)
ipv4 = ipaddr.IPNetwork('1.2.3.4')
ipv6 = ipaddr.IPNetwork('2001:658:22a:cafe:200:0:0:1')
self.assertEqual(ipv4, ipaddr.IPNetwork(int(ipv4)))
self.assertEqual(ipv6, ipaddr.IPNetwork(int(ipv6)))
v6_int = 42540616829182469433547762482097946625
self.assertEqual(self.ipv6.ip, ipaddr.IPv6Network(v6_int).ip)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv6Network, 2**128)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv6Network, -1)
self.assertEqual(ipaddr.IPNetwork(self.ipv4.ip).version, 4)
self.assertEqual(ipaddr.IPNetwork(self.ipv6.ip).version, 6)
def testIpFromPacked(self):
ip = ipaddr.IPNetwork
self.assertEqual(self.ipv4.ip,
ip(_cb('\x01\x02\x03\x04')).ip)
self.assertEqual(ip('255.254.253.252'),
ip(_cb('\xff\xfe\xfd\xfc')))
self.assertRaises(ValueError, ipaddr.IPNetwork, _cb('\x00' * 3))
self.assertRaises(ValueError, ipaddr.IPNetwork, _cb('\x00' * 5))
self.assertEqual(self.ipv6.ip,
ip(_cb('\x20\x01\x06\x58\x02\x2a\xca\xfe'
'\x02\x00\x00\x00\x00\x00\x00\x01')).ip)
self.assertEqual(ip('ffff:2:3:4:ffff::'),
ip(_cb('\xff\xff\x00\x02\x00\x03\x00\x04' +
'\xff\xff' + '\x00' * 6)))
self.assertEqual(ip('::'),
ip(_cb('\x00' * 16)))
self.assertRaises(ValueError, ip, _cb('\x00' * 15))
self.assertRaises(ValueError, ip, _cb('\x00' * 17))
def testGetIp(self):
self.assertEqual(int(self.ipv4.ip), 16909060)
self.assertEqual(str(self.ipv4.ip), '1.2.3.4')
self.assertEqual(str(self.ipv4_hostmask.ip), '10.0.0.1')
self.assertEqual(int(self.ipv6.ip),
42540616829182469433547762482097946625)
self.assertEqual(str(self.ipv6.ip),
'2001:658:22a:cafe:200::1')
def testGetNetmask(self):
self.assertEqual(int(self.ipv4.netmask), 4294967040L)
self.assertEqual(str(self.ipv4.netmask), '255.255.255.0')
self.assertEqual(str(self.ipv4_hostmask.netmask), '255.0.0.0')
self.assertEqual(int(self.ipv6.netmask),
340282366920938463444927863358058659840)
self.assertEqual(self.ipv6.prefixlen, 64)
def testZeroNetmask(self):
ipv4_zero_netmask = ipaddr.IPv4Network('1.2.3.4/0')
self.assertEqual(int(ipv4_zero_netmask.netmask), 0)
self.assertTrue(ipv4_zero_netmask._is_valid_netmask(str(0)))
ipv6_zero_netmask = ipaddr.IPv6Network('::1/0')
self.assertEqual(int(ipv6_zero_netmask.netmask), 0)
self.assertTrue(ipv6_zero_netmask._is_valid_netmask(str(0)))
def testGetBroadcast(self):
self.assertEqual(int(self.ipv4.broadcast), 16909311L)
self.assertEqual(str(self.ipv4.broadcast), '1.2.3.255')
self.assertEqual(int(self.ipv6.broadcast),
42540616829182469451850391367731642367)
self.assertEqual(str(self.ipv6.broadcast),
'2001:658:22a:cafe:ffff:ffff:ffff:ffff')
def testGetPrefixlen(self):
self.assertEqual(self.ipv4.prefixlen, 24)
self.assertEqual(self.ipv6.prefixlen, 64)
def testGetSupernet(self):
self.assertEqual(self.ipv4.supernet().prefixlen, 23)
self.assertEqual(str(self.ipv4.supernet().network), '1.2.2.0')
self.assertEqual(ipaddr.IPv4Network('0.0.0.0/0').supernet(),
ipaddr.IPv4Network('0.0.0.0/0'))
self.assertEqual(self.ipv6.supernet().prefixlen, 63)
self.assertEqual(str(self.ipv6.supernet().network),
'2001:658:22a:cafe::')
self.assertEqual(ipaddr.IPv6Network('::0/0').supernet(),
ipaddr.IPv6Network('::0/0'))
def testGetSupernet3(self):
self.assertEqual(self.ipv4.supernet(3).prefixlen, 21)
self.assertEqual(str(self.ipv4.supernet(3).network), '1.2.0.0')
self.assertEqual(self.ipv6.supernet(3).prefixlen, 61)
self.assertEqual(str(self.ipv6.supernet(3).network),
'2001:658:22a:caf8::')
def testGetSupernet4(self):
self.assertRaises(ValueError, self.ipv4.supernet, prefixlen_diff=2,
new_prefix=1)
self.assertRaises(ValueError, self.ipv4.supernet, new_prefix=25)
self.assertEqual(self.ipv4.supernet(prefixlen_diff=2),
self.ipv4.supernet(new_prefix=22))
self.assertRaises(ValueError, self.ipv6.supernet, prefixlen_diff=2,
new_prefix=1)
self.assertRaises(ValueError, self.ipv6.supernet, new_prefix=65)
self.assertEqual(self.ipv6.supernet(prefixlen_diff=2),
self.ipv6.supernet(new_prefix=62))
def testIterSubnets(self):
self.assertEqual(self.ipv4.subnet(), list(self.ipv4.iter_subnets()))
self.assertEqual(self.ipv6.subnet(), list(self.ipv6.iter_subnets()))
def testIterHosts(self):
self.assertEqual([ipaddr.IPv4Address('2.0.0.0'),
ipaddr.IPv4Address('2.0.0.1')],
list(ipaddr.IPNetwork('2.0.0.0/31').iterhosts()))
def testFancySubnetting(self):
self.assertEqual(sorted(self.ipv4.subnet(prefixlen_diff=3)),
sorted(self.ipv4.subnet(new_prefix=27)))
self.assertRaises(ValueError, self.ipv4.subnet, new_prefix=23)
self.assertRaises(ValueError, self.ipv4.subnet,
prefixlen_diff=3, new_prefix=27)
self.assertEqual(sorted(self.ipv6.subnet(prefixlen_diff=4)),
sorted(self.ipv6.subnet(new_prefix=68)))
self.assertRaises(ValueError, self.ipv6.subnet, new_prefix=63)
self.assertRaises(ValueError, self.ipv6.subnet,
prefixlen_diff=4, new_prefix=68)
def testGetSubnet(self):
self.assertEqual(self.ipv4.subnet()[0].prefixlen, 25)
self.assertEqual(str(self.ipv4.subnet()[0].network), '1.2.3.0')
self.assertEqual(str(self.ipv4.subnet()[1].network), '1.2.3.128')
self.assertEqual(self.ipv6.subnet()[0].prefixlen, 65)
def testGetSubnetForSingle32(self):
ip = ipaddr.IPv4Network('1.2.3.4/32')
subnets1 = [str(x) for x in ip.subnet()]
subnets2 = [str(x) for x in ip.subnet(2)]
self.assertEqual(subnets1, ['1.2.3.4/32'])
self.assertEqual(subnets1, subnets2)
def testGetSubnetForSingle128(self):
ip = ipaddr.IPv6Network('::1/128')
subnets1 = [str(x) for x in ip.subnet()]
subnets2 = [str(x) for x in ip.subnet(2)]
self.assertEqual(subnets1, ['::1/128'])
self.assertEqual(subnets1, subnets2)
def testSubnet2(self):
ips = [str(x) for x in self.ipv4.subnet(2)]
self.assertEqual(
ips,
['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26'])
ipsv6 = [str(x) for x in self.ipv6.subnet(2)]
self.assertEqual(
ipsv6,
['2001:658:22a:cafe::/66',
'2001:658:22a:cafe:4000::/66',
'2001:658:22a:cafe:8000::/66',
'2001:658:22a:cafe:c000::/66'])
def testSubnetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError, self.ipv4.subnet, 9)
self.assertRaises(ValueError, self.ipv6.subnet, 65)
def testSupernetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError, self.ipv4.supernet, 25)
self.assertRaises(ValueError, self.ipv6.supernet, 65)
def testSubnetFailsForNegativeCidrDiff(self):
self.assertRaises(ValueError, self.ipv4.subnet, -1)
self.assertRaises(ValueError, self.ipv6.subnet, -1)
def testGetNumHosts(self):
self.assertEqual(self.ipv4.numhosts, 256)
self.assertEqual(self.ipv4.subnet()[0].numhosts, 128)
self.assertEqual(self.ipv4.supernet().numhosts, 512)
self.assertEqual(self.ipv6.numhosts, 18446744073709551616)
self.assertEqual(self.ipv6.subnet()[0].numhosts, 9223372036854775808)
self.assertEqual(self.ipv6.supernet().numhosts, 36893488147419103232)
def testContains(self):
self.assertTrue(ipaddr.IPv4Network('1.2.3.128/25') in self.ipv4)
self.assertFalse(ipaddr.IPv4Network('1.2.4.1/24') in self.ipv4)
self.assertTrue(self.ipv4 in self.ipv4)
self.assertTrue(self.ipv6 in self.ipv6)
# We can test addresses and string as well.
addr1 = ipaddr.IPv4Address('1.2.3.37')
self.assertTrue(addr1 in self.ipv4)
# issue 61, bad network comparison on like-ip'd network objects
# with identical broadcast addresses.
self.assertFalse(ipaddr.IPv4Network('1.1.0.0/16').__contains__(
ipaddr.IPv4Network('1.0.0.0/15')))
def testBadAddress(self):
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network,
'poop')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, '1.2.3.256')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'poopv6')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, '1.2.3.4/32/24')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, '10/8')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv6Network, '10/8')
def testBadNetMask(self):
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.2.3.4/')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.2.3.4/33')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.2.3.4/254.254.255.256')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.1.1.1/240.255.0.0')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv6Network, '::1/')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv6Network, '::1/129')
def testNth(self):
self.assertEqual(str(self.ipv4[5]), '1.2.3.5')
self.assertRaises(IndexError, self.ipv4.__getitem__, 256)
self.assertEqual(str(self.ipv6[5]),
'2001:658:22a:cafe::5')
def testGetitem(self):
# http://code.google.com/p/ipaddr-py/issues/detail?id=15
addr = ipaddr.IPv4Network('172.31.255.128/255.255.255.240')
self.assertEqual(28, addr.prefixlen)
addr_list = list(addr)
self.assertEqual('172.31.255.128', str(addr_list[0]))
self.assertEqual('172.31.255.128', str(addr[0]))
self.assertEqual('172.31.255.143', str(addr_list[-1]))
self.assertEqual('172.31.255.143', str(addr[-1]))
self.assertEqual(addr_list[-1], addr[-1])
def testEqual(self):
self.assertTrue(self.ipv4 == ipaddr.IPv4Network('1.2.3.4/24'))
self.assertFalse(self.ipv4 == ipaddr.IPv4Network('1.2.3.4/23'))
self.assertFalse(self.ipv4 == ipaddr.IPv6Network('::1.2.3.4/24'))
self.assertFalse(self.ipv4 == '')
self.assertFalse(self.ipv4 == [])
self.assertFalse(self.ipv4 == 2)
self.assertTrue(ipaddr.IPNetwork('1.1.1.1/32') ==
ipaddr.IPAddress('1.1.1.1'))
self.assertTrue(ipaddr.IPNetwork('1.1.1.1/24') ==
ipaddr.IPAddress('1.1.1.1'))
self.assertFalse(ipaddr.IPNetwork('1.1.1.0/24') ==
ipaddr.IPAddress('1.1.1.1'))
self.assertTrue(self.ipv6 ==
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/64'))
self.assertTrue(ipaddr.IPNetwork('::1/128') ==
ipaddr.IPAddress('::1'))
self.assertTrue(ipaddr.IPNetwork('::1/127') ==
ipaddr.IPAddress('::1'))
self.assertFalse(ipaddr.IPNetwork('::0/127') ==
ipaddr.IPAddress('::1'))
self.assertFalse(self.ipv6 ==
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/63'))
self.assertFalse(self.ipv6 == ipaddr.IPv4Network('1.2.3.4/23'))
self.assertFalse(self.ipv6 == '')
self.assertFalse(self.ipv6 == [])
self.assertFalse(self.ipv6 == 2)
def testNotEqual(self):
self.assertFalse(self.ipv4 != ipaddr.IPv4Network('1.2.3.4/24'))
self.assertTrue(self.ipv4 != ipaddr.IPv4Network('1.2.3.4/23'))
self.assertTrue(self.ipv4 != ipaddr.IPv6Network('::1.2.3.4/24'))
self.assertTrue(self.ipv4 != '')
self.assertTrue(self.ipv4 != [])
self.assertTrue(self.ipv4 != 2)
addr2 = ipaddr.IPAddress('2001:658:22a:cafe:200::1')
self.assertFalse(self.ipv6 !=
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/64'))
self.assertTrue(self.ipv6 !=
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/63'))
self.assertTrue(self.ipv6 != ipaddr.IPv4Network('1.2.3.4/23'))
self.assertTrue(self.ipv6 != '')
self.assertTrue(self.ipv6 != [])
self.assertTrue(self.ipv6 != 2)
def testSlash32Constructor(self):
self.assertEqual(str(ipaddr.IPv4Network('1.2.3.4/255.255.255.255')),
'1.2.3.4/32')
def testSlash128Constructor(self):
self.assertEqual(str(ipaddr.IPv6Network('::1/128')),
'::1/128')
def testSlash0Constructor(self):
self.assertEqual(str(ipaddr.IPv4Network('1.2.3.4/0.0.0.0')),
'1.2.3.4/0')
def testCollapsing(self):
# test only IP addresses including some duplicates
ip1 = ipaddr.IPv4Address('1.1.1.0')
ip2 = ipaddr.IPv4Address('1.1.1.1')
ip3 = ipaddr.IPv4Address('1.1.1.2')
ip4 = ipaddr.IPv4Address('1.1.1.3')
ip5 = ipaddr.IPv4Address('1.1.1.4')
ip6 = ipaddr.IPv4Address('1.1.1.0')
# check that addreses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.1.0/30'),
ipaddr.IPv4Network('1.1.1.4/32')])
# test a mix of IP addresses and networks including some duplicates
ip1 = ipaddr.IPv4Address('1.1.1.0')
ip2 = ipaddr.IPv4Address('1.1.1.1')
ip3 = ipaddr.IPv4Address('1.1.1.2')
ip4 = ipaddr.IPv4Address('1.1.1.3')
ip5 = ipaddr.IPv4Network('1.1.1.4/30')
ip6 = ipaddr.IPv4Network('1.1.1.4/30')
# check that addreses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip5, ip1, ip2, ip3, ip4, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.1.0/29')])
# test only IP networks
ip1 = ipaddr.IPv4Network('1.1.0.0/24')
ip2 = ipaddr.IPv4Network('1.1.1.0/24')
ip3 = ipaddr.IPv4Network('1.1.2.0/24')
ip4 = ipaddr.IPv4Network('1.1.3.0/24')
ip5 = ipaddr.IPv4Network('1.1.4.0/24')
# stored in no particular order b/c we want CollapseAddr to call [].sort
ip6 = ipaddr.IPv4Network('1.1.0.0/22')
# check that addreses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.0.0/22'),
ipaddr.IPv4Network('1.1.4.0/24')])
# test that two addresses are supernet'ed properly
collapsed = ipaddr.collapse_address_list([ip1, ip2])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.0.0/23')])
# test same IP networks
ip_same1 = ip_same2 = ipaddr.IPv4Network('1.1.1.1/32')
self.assertEqual(ipaddr.collapse_address_list([ip_same1, ip_same2]),
[ip_same1])
# test same IP addresses
ip_same1 = ip_same2 = ipaddr.IPv4Address('1.1.1.1')
self.assertEqual(ipaddr.collapse_address_list([ip_same1, ip_same2]),
[ipaddr.IPNetwork('1.1.1.1/32')])
ip1 = ipaddr.IPv6Network('::2001:1/100')
ip2 = ipaddr.IPv6Network('::2002:1/120')
ip3 = ipaddr.IPv6Network('::2001:1/96')
# test that ipv6 addresses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3])
self.assertEqual(collapsed, [ip3])
# the toejam test
ip1 = ipaddr.IPAddress('1.1.1.1')
ip2 = ipaddr.IPAddress('::1')
self.assertRaises(TypeError, ipaddr.collapse_address_list,
[ip1, ip2])
def testSummarizing(self):
#ip = ipaddr.IPAddress
#ipnet = ipaddr.IPNetwork
summarize = ipaddr.summarize_address_range
ip1 = ipaddr.IPAddress('1.1.1.0')
ip2 = ipaddr.IPAddress('1.1.1.255')
# test a /24 is sumamrized properly
self.assertEqual(summarize(ip1, ip2)[0], ipaddr.IPNetwork('1.1.1.0/24'))
# test an IPv4 range that isn't on a network byte boundary
ip2 = ipaddr.IPAddress('1.1.1.8')
self.assertEqual(summarize(ip1, ip2), [ipaddr.IPNetwork('1.1.1.0/29'),
ipaddr.IPNetwork('1.1.1.8')])
ip1 = ipaddr.IPAddress('1::')
ip2 = ipaddr.IPAddress('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
# test a IPv6 is sumamrized properly
self.assertEqual(summarize(ip1, ip2)[0], ipaddr.IPNetwork('1::/16'))
# test an IPv6 range that isn't on a network byte boundary
ip2 = ipaddr.IPAddress('2::')
self.assertEqual(summarize(ip1, ip2), [ipaddr.IPNetwork('1::/16'),
ipaddr.IPNetwork('2::/128')])
# test exception raised when first is greater than last
self.assertRaises(ValueError, summarize, ipaddr.IPAddress('1.1.1.0'),
ipaddr.IPAddress('1.1.0.0'))
# test exception raised when first and last aren't IP addresses
self.assertRaises(TypeError, summarize,
ipaddr.IPNetwork('1.1.1.0'),
ipaddr.IPNetwork('1.1.0.0'))
self.assertRaises(TypeError, summarize,
ipaddr.IPNetwork('1.1.1.0'), ipaddr.IPNetwork('1.1.0.0'))
# test exception raised when first and last are not same version
self.assertRaises(TypeError, summarize, ipaddr.IPAddress('::'),
ipaddr.IPNetwork('1.1.0.0'))
def testAddressComparison(self):
self.assertTrue(ipaddr.IPAddress('1.1.1.1') <=
ipaddr.IPAddress('1.1.1.1'))
self.assertTrue(ipaddr.IPAddress('1.1.1.1') <=
ipaddr.IPAddress('1.1.1.2'))
self.assertTrue(ipaddr.IPAddress('::1') <= ipaddr.IPAddress('::1'))
self.assertTrue(ipaddr.IPAddress('::1') <= ipaddr.IPAddress('::2'))
def testNetworkComparison(self):
# ip1 and ip2 have the same network address
ip1 = ipaddr.IPv4Network('1.1.1.0/24')
ip2 = ipaddr.IPv4Network('1.1.1.1/24')
ip3 = ipaddr.IPv4Network('1.1.2.0/24')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip2), 0)
self.assertTrue(ip1._get_networks_key() == ip2._get_networks_key())
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
ip1 = ipaddr.IPv6Network('2001::2000/96')
ip2 = ipaddr.IPv6Network('2001::2001/96')
ip3 = ipaddr.IPv6Network('2001:ffff::2000/96')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip2), 0)
self.assertTrue(ip1._get_networks_key() == ip2._get_networks_key())
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
# Test comparing different protocols.
# Should always raise a TypeError.
ipv6 = ipaddr.IPv6Network('::/0')
ipv4 = ipaddr.IPv4Network('0.0.0.0/0')
self.assertRaises(TypeError, ipv4.__lt__, ipv6)
self.assertRaises(TypeError, ipv4.__gt__, ipv6)
self.assertRaises(TypeError, ipv6.__lt__, ipv4)
self.assertRaises(TypeError, ipv6.__gt__, ipv4)
# Regression test for issue 19.
ip1 = ipaddr.IPNetwork('10.1.2.128/25')
self.assertFalse(ip1 < ip1)
self.assertFalse(ip1 > ip1)
ip2 = ipaddr.IPNetwork('10.1.3.0/24')
self.assertTrue(ip1 < ip2)
self.assertFalse(ip2 < ip1)
self.assertFalse(ip1 > ip2)
self.assertTrue(ip2 > ip1)
ip3 = ipaddr.IPNetwork('10.1.3.0/25')
self.assertTrue(ip2 < ip3)
self.assertFalse(ip3 < ip2)
self.assertFalse(ip2 > ip3)
self.assertTrue(ip3 > ip2)
# Regression test for issue 28.
ip1 = ipaddr.IPNetwork('10.10.10.0/31')
ip2 = ipaddr.IPNetwork('10.10.10.0')
ip3 = ipaddr.IPNetwork('10.10.10.2/31')
ip4 = ipaddr.IPNetwork('10.10.10.2')
sorted = [ip1, ip2, ip3, ip4]
unsorted = [ip2, ip4, ip1, ip3]
unsorted.sort()
self.assertEqual(sorted, unsorted)
unsorted = [ip4, ip1, ip3, ip2]
unsorted.sort()
self.assertEqual(sorted, unsorted)
self.assertRaises(TypeError, ip1.__lt__, ipaddr.IPAddress('10.10.10.0'))
self.assertRaises(TypeError, ip2.__lt__, ipaddr.IPAddress('10.10.10.0'))
# <=, >=
self.assertTrue(ipaddr.IPNetwork('1.1.1.1') <=
ipaddr.IPNetwork('1.1.1.1'))
self.assertTrue(ipaddr.IPNetwork('1.1.1.1') <=
ipaddr.IPNetwork('1.1.1.2'))
self.assertFalse(ipaddr.IPNetwork('1.1.1.2') <=
ipaddr.IPNetwork('1.1.1.1'))
self.assertTrue(ipaddr.IPNetwork('::1') <= ipaddr.IPNetwork('::1'))
self.assertTrue(ipaddr.IPNetwork('::1') <= ipaddr.IPNetwork('::2'))
self.assertFalse(ipaddr.IPNetwork('::2') <= ipaddr.IPNetwork('::1'))
def testStrictNetworks(self):
self.assertRaises(ValueError, ipaddr.IPNetwork, '192.168.1.1/24',
strict=True)
self.assertRaises(ValueError, ipaddr.IPNetwork, '::1/120', strict=True)
def testOverlaps(self):
other = ipaddr.IPv4Network('1.2.3.0/30')
other2 = ipaddr.IPv4Network('1.2.2.0/24')
other3 = ipaddr.IPv4Network('1.2.2.64/26')
self.assertTrue(self.ipv4.overlaps(other))
self.assertFalse(self.ipv4.overlaps(other2))
self.assertTrue(other2.overlaps(other3))
def testEmbeddedIpv4(self):
ipv4_string = '192.168.0.1'
ipv4 = ipaddr.IPv4Network(ipv4_string)
v4compat_ipv6 = ipaddr.IPv6Network('::%s' % ipv4_string)
self.assertEqual(int(v4compat_ipv6.ip), int(ipv4.ip))
v4mapped_ipv6 = ipaddr.IPv6Network('::ffff:%s' % ipv4_string)
self.assertNotEqual(v4mapped_ipv6.ip, ipv4.ip)
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'2001:1.1.1.1:1.1.1.1')
# Issue 67: IPv6 with embedded IPv4 address not recognized.
def testIPv6AddressTooLarge(self):
# RFC4291 2.5.5.2
self.assertEqual(ipaddr.IPAddress('::FFFF:192.0.2.1'),
ipaddr.IPAddress('::FFFF:c000:201'))
# RFC4291 2.2 (part 3) x::d.d.d.d
self.assertEqual(ipaddr.IPAddress('FFFF::192.0.2.1'),
ipaddr.IPAddress('FFFF::c000:201'))
def testIPVersion(self):
self.assertEqual(self.ipv4.version, 4)
self.assertEqual(self.ipv6.version, 6)
def testMaxPrefixLength(self):
self.assertEqual(self.ipv4.max_prefixlen, 32)
self.assertEqual(self.ipv6.max_prefixlen, 128)
def testPacked(self):
self.assertEqual(self.ipv4.packed,
_cb('\x01\x02\x03\x04'))
self.assertEqual(ipaddr.IPv4Network('255.254.253.252').packed,
_cb('\xff\xfe\xfd\xfc'))
self.assertEqual(self.ipv6.packed,
_cb('\x20\x01\x06\x58\x02\x2a\xca\xfe'
'\x02\x00\x00\x00\x00\x00\x00\x01'))
self.assertEqual(ipaddr.IPv6Network('ffff:2:3:4:ffff::').packed,
_cb('\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff'
+ '\x00' * 6))
self.assertEqual(ipaddr.IPv6Network('::1:0:0:0:0').packed,
_cb('\x00' * 6 + '\x00\x01' + '\x00' * 8))
def testIpStrFromPrefixlen(self):
ipv4 = ipaddr.IPv4Network('1.2.3.4/24')
self.assertEqual(ipv4._ip_string_from_prefix(), '255.255.255.0')
self.assertEqual(ipv4._ip_string_from_prefix(28), '255.255.255.240')
def testIpType(self):
ipv4net = ipaddr.IPNetwork('1.2.3.4')
ipv4addr = ipaddr.IPAddress('1.2.3.4')
ipv6net = ipaddr.IPNetwork('::1.2.3.4')
ipv6addr = ipaddr.IPAddress('::1.2.3.4')
self.assertEqual(ipaddr.IPv4Network, type(ipv4net))
self.assertEqual(ipaddr.IPv4Address, type(ipv4addr))
self.assertEqual(ipaddr.IPv6Network, type(ipv6net))
self.assertEqual(ipaddr.IPv6Address, type(ipv6addr))
def testReservedIpv4(self):
# test networks
self.assertEqual(True, ipaddr.IPNetwork('224.1.1.1/31').is_multicast)
self.assertEqual(False, ipaddr.IPNetwork('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddr.IPNetwork('192.168.1.1/17').is_private)
self.assertEqual(False, ipaddr.IPNetwork('192.169.0.0').is_private)
self.assertEqual(True, ipaddr.IPNetwork('10.255.255.255').is_private)
self.assertEqual(False, ipaddr.IPNetwork('11.0.0.0').is_private)
self.assertEqual(True, ipaddr.IPNetwork('172.31.255.255').is_private)
self.assertEqual(False, ipaddr.IPNetwork('172.32.0.0').is_private)
self.assertEqual(True,
ipaddr.IPNetwork('169.254.100.200/24').is_link_local)
self.assertEqual(False,
ipaddr.IPNetwork('169.255.100.200/24').is_link_local)
self.assertEqual(True,
ipaddr.IPNetwork('127.100.200.254/32').is_loopback)
self.assertEqual(True, ipaddr.IPNetwork('127.42.0.0/16').is_loopback)
self.assertEqual(False, ipaddr.IPNetwork('128.0.0.0').is_loopback)
# test addresses
self.assertEqual(True, ipaddr.IPAddress('224.1.1.1').is_multicast)
self.assertEqual(False, ipaddr.IPAddress('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddr.IPAddress('192.168.1.1').is_private)
self.assertEqual(False, ipaddr.IPAddress('192.169.0.0').is_private)
self.assertEqual(True, ipaddr.IPAddress('10.255.255.255').is_private)
self.assertEqual(False, ipaddr.IPAddress('11.0.0.0').is_private)
self.assertEqual(True, ipaddr.IPAddress('172.31.255.255').is_private)
self.assertEqual(False, ipaddr.IPAddress('172.32.0.0').is_private)
self.assertEqual(True,
ipaddr.IPAddress('169.254.100.200').is_link_local)
self.assertEqual(False,
ipaddr.IPAddress('169.255.100.200').is_link_local)
self.assertEqual(True,
ipaddr.IPAddress('127.100.200.254').is_loopback)
self.assertEqual(True, ipaddr.IPAddress('127.42.0.0').is_loopback)
self.assertEqual(False, ipaddr.IPAddress('128.0.0.0').is_loopback)
self.assertEqual(True, ipaddr.IPNetwork('0.0.0.0').is_unspecified)
def testReservedIpv6(self):
self.assertEqual(True, ipaddr.IPNetwork('ffff::').is_multicast)
self.assertEqual(True, ipaddr.IPNetwork(2**128-1).is_multicast)
self.assertEqual(True, ipaddr.IPNetwork('ff00::').is_multicast)
self.assertEqual(False, ipaddr.IPNetwork('fdff::').is_multicast)
self.assertEqual(True, ipaddr.IPNetwork('fecf::').is_site_local)
self.assertEqual(True, ipaddr.IPNetwork(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddr.IPNetwork('fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddr.IPNetwork('ff00::').is_site_local)
self.assertEqual(True, ipaddr.IPNetwork('fc00::').is_private)
self.assertEqual(True, ipaddr.IPNetwork(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddr.IPNetwork('fbff:ffff::').is_private)
self.assertEqual(False, ipaddr.IPNetwork('fe00::').is_private)
self.assertEqual(True, ipaddr.IPNetwork('fea0::').is_link_local)
self.assertEqual(True, ipaddr.IPNetwork('febf:ffff::').is_link_local)
self.assertEqual(False, ipaddr.IPNetwork('fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddr.IPNetwork('fec0::').is_link_local)
self.assertEqual(True, ipaddr.IPNetwork('0:0::0:01').is_loopback)
self.assertEqual(False, ipaddr.IPNetwork('::1/127').is_loopback)
self.assertEqual(False, ipaddr.IPNetwork('::').is_loopback)
self.assertEqual(False, ipaddr.IPNetwork('::2').is_loopback)
self.assertEqual(True, ipaddr.IPNetwork('0::0').is_unspecified)
self.assertEqual(False, ipaddr.IPNetwork('::1').is_unspecified)
self.assertEqual(False, ipaddr.IPNetwork('::/127').is_unspecified)
# test addresses
self.assertEqual(True, ipaddr.IPAddress('ffff::').is_multicast)
self.assertEqual(True, ipaddr.IPAddress(2**128-1).is_multicast)
self.assertEqual(True, ipaddr.IPAddress('ff00::').is_multicast)
self.assertEqual(False, ipaddr.IPAddress('fdff::').is_multicast)
self.assertEqual(True, ipaddr.IPAddress('fecf::').is_site_local)
self.assertEqual(True, ipaddr.IPAddress(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddr.IPAddress('fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddr.IPAddress('ff00::').is_site_local)
self.assertEqual(True, ipaddr.IPAddress('fc00::').is_private)
self.assertEqual(True, ipaddr.IPAddress(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddr.IPAddress('fbff:ffff::').is_private)
self.assertEqual(False, ipaddr.IPAddress('fe00::').is_private)
self.assertEqual(True, ipaddr.IPAddress('fea0::').is_link_local)
self.assertEqual(True, ipaddr.IPAddress('febf:ffff::').is_link_local)
self.assertEqual(False, ipaddr.IPAddress('fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddr.IPAddress('fec0::').is_link_local)
self.assertEqual(True, ipaddr.IPAddress('0:0::0:01').is_loopback)
self.assertEqual(True, ipaddr.IPAddress('::1').is_loopback)
self.assertEqual(False, ipaddr.IPAddress('::2').is_loopback)
self.assertEqual(True, ipaddr.IPAddress('0::0').is_unspecified)
self.assertEqual(False, ipaddr.IPAddress('::1').is_unspecified)
# some generic IETF reserved addresses
self.assertEqual(True, ipaddr.IPAddress('100::').is_reserved)
self.assertEqual(True, ipaddr.IPNetwork('4000::1/128').is_reserved)
def testIpv4Mapped(self):
self.assertEqual(ipaddr.IPAddress('::ffff:192.168.1.1').ipv4_mapped,
ipaddr.IPAddress('192.168.1.1'))
self.assertEqual(ipaddr.IPAddress('::c0a8:101').ipv4_mapped, None)
self.assertEqual(ipaddr.IPAddress('::ffff:c0a8:101').ipv4_mapped,
ipaddr.IPAddress('192.168.1.1'))
def testAddrExclude(self):
addr1 = ipaddr.IPNetwork('10.1.1.0/24')
addr2 = ipaddr.IPNetwork('10.1.1.0/26')
addr3 = ipaddr.IPNetwork('10.2.1.0/24')
addr4 = ipaddr.IPAddress('10.1.1.0')
self.assertEqual(addr1.address_exclude(addr2),
[ipaddr.IPNetwork('10.1.1.64/26'),
ipaddr.IPNetwork('10.1.1.128/25')])
self.assertRaises(ValueError, addr1.address_exclude, addr3)
self.assertRaises(TypeError, addr1.address_exclude, addr4)
self.assertEqual(addr1.address_exclude(addr1), [])
def testHash(self):
self.assertEqual(hash(ipaddr.IPNetwork('10.1.1.0/24')),
hash(ipaddr.IPNetwork('10.1.1.0/24')))
self.assertEqual(hash(ipaddr.IPAddress('10.1.1.0')),
hash(ipaddr.IPAddress('10.1.1.0')))
# i70
self.assertEqual(hash(ipaddr.IPAddress('1.2.3.4')),
hash(ipaddr.IPAddress(
long(ipaddr.IPAddress('1.2.3.4')._ip))))
ip1 = ipaddr.IPAddress('10.1.1.0')
ip2 = ipaddr.IPAddress('1::')
dummy = {}
dummy[self.ipv4] = None
dummy[self.ipv6] = None
dummy[ip1] = None
dummy[ip2] = None
self.assertTrue(self.ipv4 in dummy)
self.assertTrue(ip2 in dummy)
def testCopyConstructor(self):
addr1 = ipaddr.IPNetwork('10.1.1.0/24')
addr2 = ipaddr.IPNetwork(addr1)
addr3 = ipaddr.IPNetwork('2001:658:22a:cafe:200::1/64')
addr4 = ipaddr.IPNetwork(addr3)
addr5 = ipaddr.IPv4Address('1.1.1.1')
addr6 = ipaddr.IPv6Address('2001:658:22a:cafe:200::1')
self.assertEqual(addr1, addr2)
self.assertEqual(addr3, addr4)
self.assertEqual(addr5, ipaddr.IPv4Address(addr5))
self.assertEqual(addr6, ipaddr.IPv6Address(addr6))
def testCompressIPv6Address(self):
test_addresses = {
'1:2:3:4:5:6:7:8': '1:2:3:4:5:6:7:8/128',
'2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128',
'2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'0:0:3:0:0:0:0:ffff': '0:0:3::ffff/128',
'0:0:0:4:0:0:0:ffff': '::4:0:0:0:ffff/128',
'0:0:0:0:5:0:0:ffff': '::5:0:0:ffff/128',
'1:0:0:4:0:0:7:8': '1::4:0:0:7:8/128',
'0:0:0:0:0:0:0:0': '::/128',
'0:0:0:0:0:0:0:0/0': '::/0',
'0:0:0:0:0:0:0:1': '::1/128',
'2001:0658:022a:cafe:0000:0000:0000:0000/66':
'2001:658:22a:cafe::/66',
'::1.2.3.4': '::102:304/128',
'1:2:3:4:5:ffff:1.2.3.4': '1:2:3:4:5:ffff:102:304/128',
'::7:6:5:4:3:2:1': '0:7:6:5:4:3:2:1/128',
'::7:6:5:4:3:2:0': '0:7:6:5:4:3:2:0/128',
'7:6:5:4:3:2:1::': '7:6:5:4:3:2:1:0/128',
'0:6:5:4:3:2:1::': '0:6:5:4:3:2:1:0/128',
}
for uncompressed, compressed in test_addresses.items():
self.assertEqual(compressed, str(ipaddr.IPv6Network(uncompressed)))
def testExplodeShortHandIpStr(self):
addr1 = ipaddr.IPv6Network('2001::1')
addr2 = ipaddr.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001/128',
addr1.exploded)
self.assertEqual('0000:0000:0000:0000:0000:0000:0000:0001/128',
ipaddr.IPv6Network('::1/128').exploded)
# issue 77
self.assertEqual('2001:0000:5ef5:79fd:0000:059d:a0e5:0ba1',
addr2.exploded)
def testIntRepresentation(self):
self.assertEqual(16909060, int(self.ipv4))
self.assertEqual(42540616829182469433547762482097946625, int(self.ipv6))
def testHexRepresentation(self):
self.assertEqual(hex(0x1020304),
hex(self.ipv4))
self.assertEqual(hex(0x20010658022ACAFE0200000000000001),
hex(self.ipv6))
# backwards compatibility
def testBackwardsCompability(self):
self.assertEqual(ipaddr.CollapseAddrList(
[ipaddr.IPNetwork('1.1.0.0/24'), ipaddr.IPNetwork('1.1.1.0/24')]),
[ipaddr.IPNetwork('1.1.0.0/23')])
self.assertEqual(ipaddr.IPNetwork('::42:0/112').AddressExclude(
ipaddr.IPNetwork('::42:8000/113')),
[ipaddr.IPNetwork('::42:0/113')])
self.assertTrue(ipaddr.IPNetwork('1::/8').CompareNetworks(
ipaddr.IPNetwork('2::/9')) < 0)
self.assertEqual(ipaddr.IPNetwork('1::/16').Contains(
ipaddr.IPNetwork('2::/16')), False)
self.assertEqual(ipaddr.IPNetwork('0.0.0.0/0').Subnet(),
[ipaddr.IPNetwork('0.0.0.0/1'),
ipaddr.IPNetwork('128.0.0.0/1')])
self.assertEqual(ipaddr.IPNetwork('::/127').Subnet(),
[ipaddr.IPNetwork('::/128'),
ipaddr.IPNetwork('::1/128')])
self.assertEqual(ipaddr.IPNetwork('1.0.0.0/32').Supernet(),
ipaddr.IPNetwork('1.0.0.0/31'))
self.assertEqual(ipaddr.IPNetwork('::/121').Supernet(),
ipaddr.IPNetwork('::/120'))
self.assertEqual(ipaddr.IPNetwork('10.0.0.2').IsRFC1918(), True)
self.assertEqual(ipaddr.IPNetwork('10.0.0.0').IsMulticast(), False)
self.assertEqual(ipaddr.IPNetwork('127.255.255.255').IsLoopback(), True)
self.assertEqual(ipaddr.IPNetwork('169.255.255.255').IsLinkLocal(),
False)
def testForceVersion(self):
self.assertEqual(ipaddr.IPNetwork(1).version, 4)
self.assertEqual(ipaddr.IPNetwork(1, version=6).version, 6)
def testWithStar(self):
self.assertEqual(str(self.ipv4.with_prefixlen), "1.2.3.4/24")
self.assertEqual(str(self.ipv4.with_netmask), "1.2.3.4/255.255.255.0")
self.assertEqual(str(self.ipv4.with_hostmask), "1.2.3.4/0.0.0.255")
self.assertEqual(str(self.ipv6.with_prefixlen),
'2001:658:22a:cafe:200::1/64')
# rfc3513 sec 2.3 says that ipv6 only uses cidr notation for
# subnets
self.assertEqual(str(self.ipv6.with_netmask),
'2001:658:22a:cafe:200::1/64')
# this probably don't make much sense, but it's included for
# compatibility with ipv4
self.assertEqual(str(self.ipv6.with_hostmask),
'2001:658:22a:cafe:200::1/::ffff:ffff:ffff:ffff')
def testNetworkElementCaching(self):
# V4 - make sure we're empty
self.assertFalse(self.ipv4._cache.has_key('network'))
self.assertFalse(self.ipv4._cache.has_key('broadcast'))
self.assertFalse(self.ipv4._cache.has_key('hostmask'))
# V4 - populate and test
self.assertEqual(self.ipv4.network, ipaddr.IPv4Address('1.2.3.0'))
self.assertEqual(self.ipv4.broadcast, ipaddr.IPv4Address('1.2.3.255'))
self.assertEqual(self.ipv4.hostmask, ipaddr.IPv4Address('0.0.0.255'))
# V4 - check we're cached
self.assertTrue(self.ipv4._cache.has_key('network'))
self.assertTrue(self.ipv4._cache.has_key('broadcast'))
self.assertTrue(self.ipv4._cache.has_key('hostmask'))
# V6 - make sure we're empty
self.assertFalse(self.ipv6._cache.has_key('network'))
self.assertFalse(self.ipv6._cache.has_key('broadcast'))
self.assertFalse(self.ipv6._cache.has_key('hostmask'))
# V6 - populate and test
self.assertEqual(self.ipv6.network,
ipaddr.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(self.ipv6.broadcast, ipaddr.IPv6Address(
'2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6.hostmask,
ipaddr.IPv6Address('::ffff:ffff:ffff:ffff'))
# V6 - check we're cached
self.assertTrue(self.ipv6._cache.has_key('network'))
self.assertTrue(self.ipv6._cache.has_key('broadcast'))
self.assertTrue(self.ipv6._cache.has_key('hostmask'))
def testTeredo(self):
# stolen from wikipedia
server = ipaddr.IPv4Address('65.54.227.120')
client = ipaddr.IPv4Address('192.0.2.45')
teredo_addr = '2001:0000:4136:e378:8000:63bf:3fff:fdd2'
self.assertEqual((server, client),
ipaddr.IPAddress(teredo_addr).teredo)
bad_addr = '2000::4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddr.IPAddress(bad_addr).teredo)
bad_addr = '2001:0001:4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddr.IPAddress(bad_addr).teredo)
# i77
teredo_addr = ipaddr.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
self.assertEqual((ipaddr.IPv4Address('94.245.121.253'),
ipaddr.IPv4Address('95.26.244.94')),
teredo_addr.teredo)
def testsixtofour(self):
sixtofouraddr = ipaddr.IPAddress('2002:ac1d:2d64::1')
bad_addr = ipaddr.IPAddress('2000:ac1d:2d64::1')
self.assertEqual(ipaddr.IPv4Address('172.29.45.100'),
sixtofouraddr.sixtofour)
self.assertFalse(bad_addr.sixtofour)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
zaragoza-sedeelectronica/hackathon-co.sa | node_modules/cordova/node_modules/cordova-lib/node_modules/npm/node_modules/node-gyp/gyp/PRESUBMIT.py | 82 | 3232 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
PYLINT_BLACKLIST = [
# TODO: fix me.
# From SCons, not done in google style.
'test/lib/TestCmd.py',
'test/lib/TestCommon.py',
'test/lib/TestGyp.py',
# Needs style fix.
'pylib/gyp/generator/scons.py',
'pylib/gyp/generator/xcode.py',
]
PYLINT_DISABLED_WARNINGS = [
# TODO: fix me.
# Many tests include modules they don't use.
'W0611',
# Include order doesn't properly include local files?
'F0401',
# Some use of built-in names.
'W0622',
# Some unused variables.
'W0612',
# Operator not preceded/followed by space.
'C0323',
'C0322',
# Unnecessary semicolon.
'W0301',
# Unused argument.
'W0613',
# String has no effect (docstring in wrong place).
'W0105',
# Comma not followed by space.
'C0324',
# Access to a protected member.
'W0212',
# Bad indent.
'W0311',
# Line too long.
'C0301',
# Undefined variable.
'E0602',
# Not exception type specified.
'W0702',
# No member of that name.
'E1101',
# Dangerous default {}.
'W0102',
# Others, too many to sort.
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
'R0201', 'E0101', 'C0321',
# ************* Module copy
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
'W0104',
]
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
# Accept any year number from 2009 to the current year.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license = (
r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': years_re,
}
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
import sys
old_sys_path = sys.path
try:
sys.path = ['pylib', 'test/lib'] + sys.path
report.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=PYLINT_BLACKLIST,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return report
def GetPreferredTrySlaves():
return ['gyp-win32', 'gyp-win64', 'gyp-linux', 'gyp-mac', 'gyp-android']
| apache-2.0 |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_polar.py | 126 | 2797 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import norm
from numpy.testing import (assert_, assert_allclose, assert_equal,
run_module_suite)
from scipy.linalg import polar, eigh
diag2 = np.array([[2, 0], [0, 3]])
a13 = np.array([[1, 2, 2]])
precomputed_cases = [
[[[0]], 'right', [[1]], [[0]]],
[[[0]], 'left', [[1]], [[0]]],
[[[9]], 'right', [[1]], [[9]]],
[[[9]], 'left', [[1]], [[9]]],
[diag2, 'right', np.eye(2), diag2],
[diag2, 'left', np.eye(2), diag2],
[a13, 'right', a13/norm(a13[0]), a13.T.dot(a13)/norm(a13[0])],
]
verify_cases = [
[[1, 2], [3, 4]],
[[1, 2, 3]],
[[1], [2], [3]],
[[1, 2, 3], [3, 4, 0]],
[[1, 2], [3, 4], [5, 5]],
[[1, 2], [3, 4+5j]],
[[1, 2, 3j]],
[[1], [2], [3j]],
[[1, 2, 3+2j], [3, 4-1j, -4j]],
[[1, 2], [3-2j, 4+0.5j], [5, 5]],
[[10000, 10, 1], [-1, 2, 3j], [0, 1, 2]],
]
def check_precomputed_polar(a, side, expected_u, expected_p):
# Compare the result of the polar decomposition to a
# precomputed result.
u, p = polar(a, side=side)
assert_allclose(u, expected_u, atol=1e-15)
assert_allclose(p, expected_p, atol=1e-15)
def verify_polar(a):
# Compute the polar decomposition, and then verify that
# the result has all the expected properties.
product_atol = np.sqrt(np.finfo(float).eps)
aa = np.asarray(a)
m, n = aa.shape
u, p = polar(a, side='right')
assert_equal(u.shape, (m, n))
assert_equal(p.shape, (n, n))
# a = up
assert_allclose(u.dot(p), a, atol=product_atol)
if m >= n:
assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
else:
assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
# p is Hermitian positive semidefinite.
assert_allclose(p.conj().T, p)
evals = eigh(p, eigvals_only=True)
nonzero_evals = evals[abs(evals) > 1e-14]
assert_((nonzero_evals >= 0).all())
u, p = polar(a, side='left')
assert_equal(u.shape, (m, n))
assert_equal(p.shape, (m, m))
# a = pu
assert_allclose(p.dot(u), a, atol=product_atol)
if m >= n:
assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
else:
assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
# p is Hermitian positive semidefinite.
assert_allclose(p.conj().T, p)
evals = eigh(p, eigvals_only=True)
nonzero_evals = evals[abs(evals) > 1e-14]
assert_((nonzero_evals >= 0).all())
def test_precomputed_cases():
for a, side, expected_u, expected_p in precomputed_cases:
yield check_precomputed_polar, a, side, expected_u, expected_p
def test_verify_cases():
for a in verify_cases:
yield verify_polar, a
if __name__ == "__main__":
run_module_suite()
| mit |
kxliugang/edx-platform | common/djangoapps/track/views/__init__.py | 74 | 6212 | import datetime
import json
import pytz
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import redirect
from django.views.decorators.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response
from track import tracker
from track import contexts
from track import shim
from track.models import TrackingLog
from eventtracking import tracker as eventtracker
def log_event(event):
"""Capture a event by sending it to the register trackers"""
tracker.send(event)
def _get_request_header(request, header_name, default=''):
"""Helper method to get header values from a request's META dict, if present."""
if request is not None and hasattr(request, 'META') and header_name in request.META:
return request.META[header_name]
else:
return default
def _get_request_value(request, value_name, default=''):
"""Helper method to get header values from a request's REQUEST dict, if present."""
if request is not None and hasattr(request, 'REQUEST') and value_name in request.REQUEST:
return request.REQUEST[value_name]
else:
return default
def user_track(request):
"""
Log when POST call to "event" URL is made by a user. Uses request.REQUEST
to allow for GET calls.
GET or POST call should provide "event_type", "event", and "page" arguments.
"""
try:
username = request.user.username
except:
username = "anonymous"
name = _get_request_value(request, 'event_type')
data = _get_request_value(request, 'event', {})
page = _get_request_value(request, 'page')
if isinstance(data, basestring) and len(data) > 0:
try:
data = json.loads(data)
except ValueError:
pass
context_override = contexts.course_context_from_url(page)
context_override['username'] = username
context_override['event_source'] = 'browser'
context_override['page'] = page
with eventtracker.get_tracker().context('edx.course.browser', context_override):
eventtracker.emit(name=name, data=data)
return HttpResponse('success')
def server_track(request, event_type, event, page=None):
"""
Log events related to server requests.
Handle the situation where the request may be NULL, as may happen with management commands.
"""
if event_type.startswith("/event_logs") and request.user.is_staff:
return # don't log
try:
username = request.user.username
except:
username = "anonymous"
# define output:
event = {
"username": username,
"ip": _get_request_header(request, 'REMOTE_ADDR'),
"referer": _get_request_header(request, 'HTTP_REFERER'),
"accept_language": _get_request_header(request, 'HTTP_ACCEPT_LANGUAGE'),
"event_source": "server",
"event_type": event_type,
"event": event,
"agent": _get_request_header(request, 'HTTP_USER_AGENT'),
"page": page,
"time": datetime.datetime.utcnow(),
"host": _get_request_header(request, 'SERVER_NAME'),
"context": eventtracker.get_tracker().resolve_context(),
}
# Some duplicated fields are passed into event-tracking via the context by track.middleware.
# Remove them from the event here since they are captured elsewhere.
shim.remove_shim_context(event)
log_event(event)
def task_track(request_info, task_info, event_type, event, page=None):
"""
Logs tracking information for events occuring within celery tasks.
The `event_type` is a string naming the particular event being logged,
while `event` is a dict containing whatever additional contextual information
is desired.
The `request_info` is a dict containing information about the original
task request. Relevant keys are `username`, `ip`, `agent`, and `host`.
While the dict is required, the values in it are not, so that {} can be
passed in.
In addition, a `task_info` dict provides more information about the current
task, to be stored with the `event` dict. This may also be an empty dict.
The `page` parameter is optional, and allows the name of the page to
be provided.
"""
# supplement event information with additional information
# about the task in which it is running.
full_event = dict(event, **task_info)
# All fields must be specified, in case the tracking information is
# also saved to the TrackingLog model. Get values from the task-level
# information, or just add placeholder values.
with eventtracker.get_tracker().context('edx.course.task', contexts.course_context_from_url(page)):
event = {
"username": request_info.get('username', 'unknown'),
"ip": request_info.get('ip', 'unknown'),
"event_source": "task",
"event_type": event_type,
"event": full_event,
"agent": request_info.get('agent', 'unknown'),
"page": page,
"time": datetime.datetime.utcnow(),
"host": request_info.get('host', 'unknown'),
"context": eventtracker.get_tracker().resolve_context(),
}
log_event(event)
@login_required
@ensure_csrf_cookie
def view_tracking_log(request, args=''):
"""View to output contents of TrackingLog model. For staff use only."""
if not request.user.is_staff:
return redirect('/')
nlen = 100
username = ''
if args:
for arg in args.split('/'):
if arg.isdigit():
nlen = int(arg)
if arg.startswith('username='):
username = arg[9:]
record_instances = TrackingLog.objects.all().order_by('-time')
if username:
record_instances = record_instances.filter(username=username)
record_instances = record_instances[0:nlen]
# fix dtstamp
fmt = '%a %d-%b-%y %H:%M:%S' # "%Y-%m-%d %H:%M:%S %Z%z"
for rinst in record_instances:
rinst.dtstr = rinst.time.replace(tzinfo=pytz.utc).astimezone(pytz.timezone('US/Eastern')).strftime(fmt)
return render_to_response('tracking_log.html', {'records': record_instances})
| agpl-3.0 |
proversity-org/edx-platform | common/test/acceptance/fixtures/xqueue.py | 24 | 1434 | """
Fixture to configure XQueue response.
"""
import json
import requests
from common.test.acceptance.fixtures import XQUEUE_STUB_URL
class XQueueResponseFixtureError(Exception):
"""
Error occurred while configuring the stub XQueue.
"""
pass
class XQueueResponseFixture(object):
"""
Configure the XQueue stub's response to submissions.
"""
def __init__(self, pattern, response_dict):
"""
Configure XQueue stub to POST `response_dict` (a dictionary)
back to the LMS when it receives a submission that contains the string
`pattern`.
Remember that there is one XQueue stub shared by all the tests;
if possible, you should have tests use unique queue names
to avoid conflict between tests running in parallel.
"""
self._pattern = pattern
self._response_dict = response_dict
def install(self):
"""
Configure the stub via HTTP.
"""
url = XQUEUE_STUB_URL + "/set_config"
# Configure the stub to respond to submissions to our queue
payload = {self._pattern: json.dumps(self._response_dict)}
response = requests.put(url, data=payload)
if not response.ok:
raise XQueueResponseFixtureError(
"Could not configure XQueue stub for queue '{1}'. Status code: {2}".format(
self._pattern, self._response_dict))
| agpl-3.0 |
salceson/android-copernicus | server/server.py | 1 | 3360 | # coding=utf-8
import json
import socket
import struct
import thread
from flask import Flask, jsonify, request, abort
from gcm import *
from gcm.gcm import GCMException
app = Flask('Android-Copernicus-Server')
MCAST_GRP = '234.6.6.6'
MCAST_PORT = 3666
DEBUG = True
SERVER_IP = ''
PORT = 20666
ALARM_MODE = False
DEVICES = set()
API_KEY = ""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', MCAST_PORT))
mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def send(command):
if DEBUG:
print 'SENT: "%s"' % command
sock.sendto(command, (MCAST_GRP, MCAST_PORT))
pass
@app.route('/lights/', methods=['POST'])
def lights():
# noinspection PyBroadException
try:
data = json.loads(request.data)
except:
abort(400)
return
if DEBUG:
print data
if 'floor' not in data or 'room' not in data or 'operation' not in data:
abort(400)
return
msg = ";".join([str(data['floor']), str(data['room']), 'lamp', str(data['operation'])])
send(msg)
return jsonify({'status': 'OK'})
@app.route('/alarm/', methods=['GET'])
def get_alarm():
global ALARM_MODE
return jsonify({'alarm': 'on' if ALARM_MODE else 'off'})
@app.route('/alarm/', methods=['POST'])
def set_alarm():
global ALARM_MODE
# noinspection PyBroadException
try:
data = json.loads(request.data)
except:
abort(400)
return
if DEBUG:
print data
if "mode" not in data:
abort(400)
return
mode = data['mode']
if mode != 'on' and mode != 'off':
abort(400)
return
ALARM_MODE = (mode == 'on')
print ALARM_MODE
return jsonify({'status': 'OK'})
@app.route('/device/', methods=['POST'])
def register_device():
global DEVICES
# noinspection PyBroadException
try:
data = json.loads(request.data)
except:
abort(400)
return
if DEBUG:
print data
if "id" not in data:
abort(400)
return
DEVICES.add(data['id'])
return jsonify({'status': 'OK'})
def thread_func():
global sock, ALARM_MODE, DEVICES, API_KEY
while True:
command = sock.recv(10240)
if DEBUG:
print command.split(';')
tab = command.split(';')
if len(tab) < 4:
continue
floor = tab[0]
room = tab[1]
device = tab[2]
operation = tab[3]
if device == 'motion' and operation == 'triggered' and ALARM_MODE:
for registration_id in DEVICES:
print registration_id
# noinspection PyBroadException
try:
gcm_connection = GCM(API_KEY)
data = {'status': 'alarm_triggered', 'floor': str(floor), 'room': str(room)}
gcm_connection.plaintext_request(registration_id=registration_id, data=data)
print "Done"
except GCMException as e:
print e
if __name__ == '__main__':
thread.start_new_thread(thread_func, ())
app.run(port=PORT, host=SERVER_IP)
| mit |
rupesh1mb/linux | Documentation/target/tcm_mod_builder.py | 337 | 24391 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_proto.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
if proto_ident == "FC":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
elif proto_ident == "SAS":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
elif proto_ident == "iSCSI":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
buf += " .name = \"" + fabric_mod_name + "\",\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += "\n"
buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs,\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_common.h>\n"
buf += "#include <scsi/scsi_proto.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('aborted_task\)\(', fo):
buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
srblum/hackathon-server | tests/utils.py | 4 | 3974 | """
Functions and utility classes for testing.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import StringIO
import functools
import humanize
import itertools
import os
import signal
import sys
import time
packageName = 'ga4gh'
def captureOutput(func, *args, **kwargs):
"""
Runs the specified function and arguments, and returns the
tuple (stdout, stderr) as strings.
"""
stdout = sys.stdout
sys.stdout = StringIO.StringIO()
stderr = sys.stderr
sys.stderr = StringIO.StringIO()
try:
func(*args, **kwargs)
stdoutOutput = sys.stdout.getvalue()
stderrOutput = sys.stderr.getvalue()
finally:
sys.stdout.close()
sys.stdout = stdout
sys.stderr.close()
sys.stderr = stderr
return stdoutOutput, stderrOutput
def zipLists(*lists):
"""
Checks to see if all of the lists are the same length, and throws
an AssertionError otherwise. Returns the zipped lists.
"""
length = len(lists[0])
for list_ in lists[1:]:
assert len(list_) == length
return zip(*lists)
def getLinesFromLogFile(stream):
stream.flush()
stream.seek(0)
lines = stream.readlines()
return lines
def getProjectRootFilePath():
# assumes we're in a directory one level below the project root
return os.path.dirname(os.path.dirname(__file__))
def getGa4ghFilePath():
return os.path.join(getProjectRootFilePath(), packageName)
def powerset(iterable, maxSets=None):
"""
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
See https://docs.python.org/2/library/itertools.html#recipes
"""
s = list(iterable)
return itertools.islice(itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1)),
0, maxSets)
# ---------------- Decorators ----------------
class TimeoutException(Exception):
"""
A process has taken too long to execute
"""
class Timed(object):
"""
Decorator that times a method, reporting runtime at finish
"""
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
self.start = time.time()
result = func(*args, **kwargs)
self.end = time.time()
self._report()
return result
return wrapper
def _report(self):
delta = self.end - self.start
timeString = humanize.time.naturaldelta(delta)
print("Finished in {} ({} seconds)".format(timeString, delta))
class Repeat(object):
"""
A decorator to use for repeating a tagged function.
The tagged function should return true if it wants to run again,
and false if it wants to stop repeating.
"""
defaultSleepSeconds = 0.1
def __init__(self, sleepSeconds=defaultSleepSeconds):
self.sleepSeconds = sleepSeconds
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
while func(*args, **kwargs):
time.sleep(self.sleepSeconds)
return wrapper
class Timeout(object):
"""
A decorator to use for only allowing a function to run
for a limited amount of time
"""
defaultTimeoutSeconds = 60
def __init__(self, timeoutSeconds=defaultTimeoutSeconds):
self.timeoutSeconds = timeoutSeconds
def __call__(self, func):
def _handle_timeout(signum, frame):
raise TimeoutException()
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
# set the alarm and execute func
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(self.timeoutSeconds)
result = func(*args, **kwargs)
finally:
# clear the alarm
signal.alarm(0)
return result
return wrapper
| apache-2.0 |
iceman126/strophejs_iceman126 | examples/attach/settings.py | 56 | 2916 | # Django settings for attach project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Some Body', '[email protected]'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '/path/to/attach.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Denver'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'asdf'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'attach.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/path/to/attach/templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'attach.attacher',
)
BOSH_SERVICE = 'http://example.com/xmpp-httpbind'
JABBERID = '[email protected]/bosh'
PASSWORD = 'juliet.is.hawt'
| mit |
linvictor88/vse-lbaas-driver | quantum/openstack/common/notifier/rpc_notifier.py | 7 | 1697 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from quantum.openstack.common import context as req_context
from quantum.openstack.common.gettextutils import _
from quantum.openstack.common import log as logging
from quantum.openstack.common import rpc
LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt(
'notification_topics', default=['notifications', ],
help='AMQP topic used for openstack notifications')
CONF = cfg.CONF
CONF.register_opt(notification_topic_opt)
def notify(context, message):
"""Sends a notification via RPC"""
if not context:
context = req_context.get_admin_context()
priority = message.get('priority',
CONF.default_notification_level)
priority = priority.lower()
for topic in CONF.notification_topics:
topic = '%s.%s' % (topic, priority)
try:
rpc.notify(context, topic, message)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"), locals())
| apache-2.0 |
Distrotech/intellij-community | python/lib/Lib/site-packages/django/contrib/messages/storage/cookie.py | 89 | 5873 | from django.conf import settings
from django.contrib.messages import constants
from django.contrib.messages.storage.base import BaseStorage, Message
from django.http import CompatCookie
from django.utils import simplejson as json
from django.utils.crypto import salted_hmac, constant_time_compare
class MessageEncoder(json.JSONEncoder):
"""
Compactly serializes instances of the ``Message`` class as JSON.
"""
message_key = '__json_message'
def default(self, obj):
if isinstance(obj, Message):
message = [self.message_key, obj.level, obj.message]
if obj.extra_tags:
message.append(obj.extra_tags)
return message
return super(MessageEncoder, self).default(obj)
class MessageDecoder(json.JSONDecoder):
"""
Decodes JSON that includes serialized ``Message`` instances.
"""
def process_messages(self, obj):
if isinstance(obj, list) and obj:
if obj[0] == MessageEncoder.message_key:
return Message(*obj[1:])
return [self.process_messages(item) for item in obj]
if isinstance(obj, dict):
return dict([(key, self.process_messages(value))
for key, value in obj.iteritems()])
return obj
def decode(self, s, **kwargs):
decoded = super(MessageDecoder, self).decode(s, **kwargs)
return self.process_messages(decoded)
class CookieStorage(BaseStorage):
"""
Stores messages in a cookie.
"""
cookie_name = 'messages'
# We should be able to store 4K in a cookie, but Internet Explorer
# imposes 4K as the *total* limit for a domain. To allow other
# cookies, we go for 3/4 of 4K.
max_cookie_size = 3072
not_finished = '__messagesnotfinished__'
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
"""
data = self.request.COOKIES.get(self.cookie_name)
messages = self._decode(data)
all_retrieved = not (messages and messages[-1] == self.not_finished)
if messages and not all_retrieved:
# remove the sentinel value
messages.pop()
return messages, all_retrieved
def _update_cookie(self, encoded_data, response):
"""
Either sets the cookie with the encoded data if there is any data to
store, or deletes the cookie.
"""
if encoded_data:
response.set_cookie(self.cookie_name, encoded_data)
else:
response.delete_cookie(self.cookie_name)
def _store(self, messages, response, remove_oldest=True, *args, **kwargs):
"""
Stores the messages to a cookie, returning a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, removes
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
"""
unstored_messages = []
encoded_data = self._encode(messages)
if self.max_cookie_size:
# data is going to be stored eventually by CompatCookie, which
# adds it's own overhead, which we must account for.
cookie = CompatCookie() # create outside the loop
def stored_length(val):
return len(cookie.value_encode(val)[1])
while encoded_data and stored_length(encoded_data) > self.max_cookie_size:
if remove_oldest:
unstored_messages.append(messages.pop(0))
else:
unstored_messages.insert(0, messages.pop())
encoded_data = self._encode(messages + [self.not_finished],
encode_empty=unstored_messages)
self._update_cookie(encoded_data, response)
return unstored_messages
def _hash(self, value):
"""
Creates an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
"""
key_salt = 'django.contrib.messages'
return salted_hmac(key_salt, value).hexdigest()
def _encode(self, messages, encode_empty=False):
"""
Returns an encoded version of the messages list which can be stored as
plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
"""
if messages or encode_empty:
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
return '%s$%s' % (self._hash(value), value)
def _decode(self, data):
"""
Safely decodes a encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, ``None`` is returned.
"""
if not data:
return None
bits = data.split('$', 1)
if len(bits) == 2:
hash, value = bits
if constant_time_compare(hash, self._hash(value)):
try:
# If we get here (and the JSON decode works), everything is
# good. In any other case, drop back and return None.
return json.loads(value, cls=MessageDecoder)
except ValueError:
pass
# Mark the data as used (so it gets removed) since something was wrong
# with the data.
self.used = True
return None
| apache-2.0 |
tmenjo/cinder-2015.1.1 | cinder/tests/test_api_urlmap.py | 27 | 12302 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for cinder.api.urlmap.py
"""
from cinder.api import urlmap
from cinder import test
class TestParseFunctions(test.TestCase):
def test_unquote_header_value_without_quotes(self):
arg = 'TestString'
result = urlmap.unquote_header_value(arg)
self.assertEqual(result, arg)
def test_unquote_header_value_with_quotes(self):
result = urlmap.unquote_header_value('"TestString"')
self.assertEqual(result, 'TestString')
def test_parse_list_header(self):
arg = 'token, "quoted value"'
result = urlmap.parse_list_header(arg)
self.assertEqual(result, ['token', 'quoted value'])
def test_parse_options_header(self):
result = urlmap.parse_options_header('Content-Type: text/html;'
' mimetype=text/html')
self.assertEqual(result, ('Content-Type:', {'mimetype': 'text/html'}))
def test_parse_options_header_without_value(self):
result = urlmap.parse_options_header(None)
self.assertEqual(result, ('', {}))
class TestAccept(test.TestCase):
def test_best_match_ValueError(self):
arg = 'text/html; q=some_invalud_value'
accept = urlmap.Accept(arg)
self.assertEqual(accept.best_match(['text/html']), (None, {}))
def test_best_match(self):
arg = '*/*; q=0.7, application/json; q=0.7, text/html; q=-0.8'
accept = urlmap.Accept(arg)
self.assertEqual(accept.best_match(['application/json',
'application/xml', 'text/html']),
('application/json', {'q': '0.7'}))
def test_match_mask_one_asterisk(self):
arg = 'text/*; q=0.7'
accept = urlmap.Accept(arg)
self.assertEqual(accept.best_match(['text/html']),
('text/html', {'q': '0.7'}))
def test_match_mask_two_asterisk(self):
arg = '*/*; q=0.7'
accept = urlmap.Accept(arg)
self.assertEqual(accept.best_match(['text/html']),
('text/html', {'q': '0.7'}))
def test_match_mask_no_asterisk(self):
arg = 'application/json; q=0.7'
accept = urlmap.Accept(arg)
self.assertEqual(accept.best_match(['text/html']), (None, {}))
def test_content_type_params(self):
arg = "application/xml; q=0.1, application/json; q=0.2," \
" text/html; q=0.3"
accept = urlmap.Accept(arg)
self.assertEqual(accept.content_type_params('application/json'),
{'q': '0.2'})
def test_content_type_params_wrong_content_type(self):
arg = 'application/xml; q=0.1, text/html; q=0.1'
accept = urlmap.Accept(arg)
self.assertEqual(accept.content_type_params('application/json'), {})
class TestUrlMapFactory(test.TestCase):
def setUp(self):
super(TestUrlMapFactory, self).setUp()
self.global_conf = {'not_found_app': 'app_global',
'domain hoobar.com port 10 /': 'some_app_global'}
self.loader = self.mox.CreateMockAnything()
def test_not_found_app_in_local_conf(self):
local_conf = {'not_found_app': 'app_local',
'domain foobar.com port 20 /': 'some_app_local'}
self.loader.get_app('app_local', global_conf=self.global_conf).\
AndReturn('app_local_loader')
self.loader.get_app('some_app_local', global_conf=self.global_conf).\
AndReturn('some_app_loader')
self.mox.ReplayAll()
expected_urlmap = urlmap.URLMap(not_found_app='app_local_loader')
expected_urlmap['http://foobar.com:20'] = 'some_app_loader'
self.assertEqual(urlmap.urlmap_factory(self.loader, self.global_conf,
**local_conf), expected_urlmap)
def test_not_found_app_not_in_local_conf(self):
local_conf = {'domain foobar.com port 20 /': 'some_app_local'}
self.loader.get_app('app_global', global_conf=self.global_conf).\
AndReturn('app_global_loader')
self.loader.get_app('some_app_local', global_conf=self.global_conf).\
AndReturn('some_app_returned_by_loader')
self.mox.ReplayAll()
expected_urlmap = urlmap.URLMap(not_found_app='app_global_loader')
expected_urlmap['http://foobar.com:20'] = 'some_app_returned'\
'_by_loader'
self.assertEqual(urlmap.urlmap_factory(self.loader, self.global_conf,
**local_conf), expected_urlmap)
def test_not_found_app_is_none(self):
local_conf = {'not_found_app': None,
'domain foobar.com port 20 /': 'some_app_local'}
self.loader.get_app('some_app_local', global_conf=self.global_conf).\
AndReturn('some_app_returned_by_loader')
self.mox.ReplayAll()
expected_urlmap = urlmap.URLMap(not_found_app=None)
expected_urlmap['http://foobar.com:20'] = 'some_app_returned'\
'_by_loader'
self.assertEqual(urlmap.urlmap_factory(self.loader, self.global_conf,
**local_conf), expected_urlmap)
class TestURLMap(test.TestCase):
def setUp(self):
super(TestURLMap, self).setUp()
self.urlmap = urlmap.URLMap()
self.input_environ = {'HTTP_ACCEPT': "application/json;"
"version=9.0", 'REQUEST_METHOD': "GET",
'CONTENT_TYPE': 'application/xml',
'SCRIPT_NAME': '/scriptname',
'PATH_INFO': "/resource.xml"}
self.environ = {'HTTP_ACCEPT': "application/json;"
"version=9.0", 'REQUEST_METHOD': "GET",
'CONTENT_TYPE': 'application/xml',
'SCRIPT_NAME': '/scriptname/app_url',
'PATH_INFO': "/resource.xml"}
def test_match_with_applications(self):
self.urlmap[('http://10.20.30.40:50', '/path/somepath')] = 'app'
self.assertEqual(self.urlmap._match('20.30.40.50', '20',
'path/somepath'), (None, None))
def test_match_without_applications(self):
self.assertEqual(self.urlmap._match('host', 20, 'app_url/somepath'),
(None, None))
def test_match_path_info_equals_app_url(self):
self.urlmap[('http://20.30.40.50:60', '/app_url/somepath')] = 'app'
self.assertEqual(self.urlmap._match('http://20.30.40.50', '60',
'/app_url/somepath'),
('app', '/app_url/somepath'))
def test_match_path_info_equals_app_url_many_app(self):
self.urlmap[('http://20.30.40.50:60', '/path')] = 'app1'
self.urlmap[('http://20.30.40.50:60', '/path/somepath')] = 'app2'
self.urlmap[('http://20.30.40.50:60', '/path/somepath/elsepath')] = \
'app3'
self.assertEqual(self.urlmap._match('http://20.30.40.50', '60',
'/path/somepath/elsepath'),
('app3', '/path/somepath/elsepath'))
def test_set_script_name(self):
app = self.mox.CreateMockAnything()
start_response = self.mox.CreateMockAnything()
app.__call__(self.environ, start_response).AndReturn('value')
self.mox.ReplayAll()
wrap = self.urlmap._set_script_name(app, '/app_url')
self.assertEqual(wrap(self.input_environ, start_response), 'value')
def test_munge_path(self):
app = self.mox.CreateMockAnything()
start_response = self.mox.CreateMockAnything()
app.__call__(self.environ, start_response).AndReturn('value')
self.mox.ReplayAll()
wrap = self.urlmap._munge_path(app, '/app_url/resource.xml',
'/app_url')
self.assertEqual(wrap(self.input_environ, start_response), 'value')
def test_content_type_strategy_without_version(self):
self.assertEqual(self.urlmap._content_type_strategy('host', 20,
self.environ),
None)
def test_content_type_strategy_with_version(self):
environ = {'HTTP_ACCEPT': "application/vnd.openstack.melange+xml;"
"version=9.0", 'REQUEST_METHOD': "GET",
'PATH_INFO': "/resource.xml",
'CONTENT_TYPE': 'application/xml; version=2.0'}
self.urlmap[('http://10.20.30.40:50', '/v2.0')] = 'app'
self.mox.StubOutWithMock(self.urlmap, '_set_script_name')
self.urlmap._set_script_name('app', '/v2.0').AndReturn('value')
self.mox.ReplayAll()
self.assertEqual(self.urlmap._content_type_strategy(
'http://10.20.30.40', '50', environ), 'value')
def test_path_strategy_wrong_path_info(self):
self.assertEqual(self.urlmap._path_strategy('http://10.20.30.40', '50',
'/resource'),
(None, None, None))
def test_path_strategy_mime_type_only(self):
self.assertEqual(self.urlmap._path_strategy('http://10.20.30.40', '50',
'/resource.xml'),
('application/xml', None, None))
def test_path_strategy(self):
self.urlmap[('http://10.20.30.40:50', '/path/elsepath/')] = 'app'
self.mox.StubOutWithMock(self.urlmap, '_munge_path')
self.urlmap._munge_path('app', '/path/elsepath/resource.xml',
'/path/elsepath').AndReturn('value')
self.mox.ReplayAll()
self.assertEqual(self.urlmap._path_strategy(
'http://10.20.30.40', '50', '/path/elsepath/resource.xml'),
('application/xml', 'value', '/path/elsepath'))
def test_path_strategy_wrong_mime_type(self):
self.urlmap[('http://10.20.30.40:50', '/path/elsepath/')] = 'app'
self.mox.StubOutWithMock(self.urlmap, '_munge_path')
self.urlmap._munge_path('app', '/path/elsepath/resource.abc',
'/path/elsepath').AndReturn('value')
self.mox.ReplayAll()
self.assertEqual(self.urlmap._path_strategy(
'http://10.20.30.40', '50', '/path/elsepath/resource.abc'),
(None, 'value', '/path/elsepath'))
def test_accept_strategy_version_not_in_params(self):
environ = {'HTTP_ACCEPT': "application/xml; q=0.1, application/json; "
"q=0.2", 'REQUEST_METHOD': "GET",
'PATH_INFO': "/resource.xml",
'CONTENT_TYPE': 'application/xml; version=2.0'}
self.assertEqual(self.urlmap._accept_strategy(
'http://10.20.30.40', '50', environ, ['application/xml']),
('application/xml', None))
def test_accept_strategy_version(self):
environ = {'HTTP_ACCEPT': "application/xml; q=0.1; version=1.0,"
"application/json; q=0.2; version=2.0",
'REQUEST_METHOD': "GET", 'PATH_INFO': "/resource.xml",
'CONTENT_TYPE': 'application/xml; version=2.0'}
self.urlmap[('http://10.20.30.40:50', '/v1.0')] = 'app'
self.mox.StubOutWithMock(self.urlmap, '_set_script_name')
self.urlmap._set_script_name('app', '/v1.0').AndReturn('value')
self.mox.ReplayAll()
self.assertEqual(self.urlmap._accept_strategy(
'http://10.20.30.40', '50', environ, ['application/xml']),
('application/xml', 'value'))
| apache-2.0 |
Samael500/flask-security | flask_security/core.py | 1 | 16227 | # -*- coding: utf-8 -*-
"""
flask.ext.security.core
~~~~~~~~~~~~~~~~~~~~~~~
Flask-Security core module
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
from flask import current_app, render_template
from flask.ext.login import AnonymousUserMixin, UserMixin as BaseUserMixin, \
LoginManager, current_user
from flask.ext.principal import Principal, RoleNeed, UserNeed, Identity, \
identity_loaded
from itsdangerous import URLSafeTimedSerializer
from passlib.context import CryptContext
from werkzeug.datastructures import ImmutableList
from werkzeug.local import LocalProxy
from werkzeug.security import safe_str_cmp
from .utils import config_value as cv, get_config, md5, url_for_security, string_types
from .views import create_blueprint
from .forms import LoginForm, ConfirmRegisterForm, RegisterForm, \
ForgotPasswordForm, ChangePasswordForm, ResetPasswordForm, \
SendConfirmationForm, PasswordlessLoginForm
# Convenient references
_security = LocalProxy(lambda: current_app.extensions['security'])
#: Default Flask-Security configuration
_default_config = {
'BLUEPRINT_NAME': 'security',
'URL_PREFIX': None,
'SUBDOMAIN': None,
'FLASH_MESSAGES': True,
'PASSWORD_HASH': 'plaintext',
'PASSWORD_SALT': None,
'LOGIN_URL': '/login',
'LOGOUT_URL': '/logout',
'REGISTER_URL': '/register',
'RESET_URL': '/reset',
'CHANGE_URL': '/change',
'CONFIRM_URL': '/confirm',
'POST_LOGIN_VIEW': '/',
'POST_LOGOUT_VIEW': '/',
'CONFIRM_ERROR_VIEW': None,
'POST_REGISTER_VIEW': None,
'POST_CONFIRM_VIEW': None,
'POST_RESET_VIEW': None,
'POST_CHANGE_VIEW': None,
'UNAUTHORIZED_VIEW': None,
'FORGOT_PASSWORD_TEMPLATE': 'security/forgot_password.html',
'LOGIN_USER_TEMPLATE': 'security/login_user.html',
'REGISTER_USER_TEMPLATE': 'security/register_user.html',
'RESET_PASSWORD_TEMPLATE': 'security/reset_password.html',
'CHANGE_PASSWORD_TEMPLATE': 'security/change_password.html',
'SEND_CONFIRMATION_TEMPLATE': 'security/send_confirmation.html',
'SEND_LOGIN_TEMPLATE': 'security/send_login.html',
'CONFIRMABLE': False,
'REGISTERABLE': False,
'RECOVERABLE': False,
'TRACKABLE': False,
'PASSWORDLESS': False,
'CHANGEABLE': False,
'SEND_REGISTER_EMAIL': True,
'SEND_PASSWORD_CHANGE_EMAIL': True,
'SEND_PASSWORD_RESET_NOTICE_EMAIL': True,
'LOGIN_WITHIN': '1 days',
'CONFIRM_EMAIL_WITHIN': '5 days',
'RESET_PASSWORD_WITHIN': '5 days',
'LOGIN_WITHOUT_CONFIRMATION': False,
'EMAIL_SENDER': 'no-reply@localhost',
'TOKEN_AUTHENTICATION_KEY': 'auth_token',
'TOKEN_AUTHENTICATION_HEADER': 'Authentication-Token',
'CONFIRM_SALT': 'confirm-salt',
'RESET_SALT': 'reset-salt',
'LOGIN_SALT': 'login-salt',
'CHANGE_SALT': 'change-salt',
'REMEMBER_SALT': 'remember-salt',
'DEFAULT_REMEMBER_ME': False,
'DEFAULT_HTTP_AUTH_REALM': 'Login Required',
'EMAIL_SUBJECT_REGISTER': 'Welcome',
'EMAIL_SUBJECT_CONFIRM': 'Please confirm your email',
'EMAIL_SUBJECT_PASSWORDLESS': 'Login instructions',
'EMAIL_SUBJECT_PASSWORD_NOTICE': 'Your password has been reset',
'EMAIL_SUBJECT_PASSWORD_CHANGE_NOTICE': 'Your password has been changed',
'EMAIL_SUBJECT_PASSWORD_RESET': 'Password reset instructions',
'USER_IDENTITY_ATTRIBUTES': ['email'],
'PASSWORD_SCHEMES': [
'bcrypt',
'des_crypt',
'pbkdf2_sha256',
'pbkdf2_sha512',
'sha256_crypt',
'sha512_crypt',
# And always last one...
'plaintext'
],
'DEPRECATED_PASSWORD_SCHEMES': ['auto']
}
#: Default Flask-Security messages
_default_messages = {
'UNAUTHORIZED': (
'You do not have permission to view this resource.', 'error'),
'CONFIRM_REGISTRATION': (
'Thank you. Confirmation instructions have been sent to %(email)s.', 'success'),
'EMAIL_CONFIRMED': (
'Thank you. Your email has been confirmed.', 'success'),
'ALREADY_CONFIRMED': (
'Your email has already been confirmed.', 'info'),
'INVALID_CONFIRMATION_TOKEN': (
'Invalid confirmation token.', 'error'),
'EMAIL_ALREADY_ASSOCIATED': (
'%(email)s is already associated with an account.', 'error'),
'PASSWORD_MISMATCH': (
'Password does not match', 'error'),
'RETYPE_PASSWORD_MISMATCH': (
'Passwords do not match', 'error'),
'INVALID_REDIRECT': (
'Redirections outside the domain are forbidden', 'error'),
'PASSWORD_RESET_REQUEST': (
'Instructions to reset your password have been sent to %(email)s.', 'info'),
'PASSWORD_RESET_EXPIRED': (
'You did not reset your password within %(within)s. New instructions have been sent '
'to %(email)s.', 'error'),
'INVALID_RESET_PASSWORD_TOKEN': (
'Invalid reset password token.', 'error'),
'CONFIRMATION_REQUIRED': (
'Email requires confirmation.', 'error'),
'CONFIRMATION_REQUEST': (
'Confirmation instructions have been sent to %(email)s.', 'info'),
'CONFIRMATION_EXPIRED': (
'You did not confirm your email within %(within)s. New instructions to confirm your email '
'have been sent to %(email)s.', 'error'),
'LOGIN_EXPIRED': (
'You did not login within %(within)s. New instructions to login have been sent to '
'%(email)s.', 'error'),
'LOGIN_EMAIL_SENT': (
'Instructions to login have been sent to %(email)s.', 'success'),
'INVALID_LOGIN_TOKEN': (
'Invalid login token.', 'error'),
'DISABLED_ACCOUNT': (
'Account is disabled.', 'error'),
'EMAIL_NOT_PROVIDED': (
'Email not provided', 'error'),
'INVALID_EMAIL_ADDRESS': (
'Invalid email address', 'error'),
'PASSWORD_NOT_PROVIDED': (
'Password not provided', 'error'),
'PASSWORD_NOT_SET': (
'No password is set for this user', 'error'),
'PASSWORD_INVALID_LENGTH': (
'Password must be at least 6 characters', 'error'),
'USER_DOES_NOT_EXIST': (
'Specified user does not exist', 'error'),
'INVALID_PASSWORD': (
'Invalid password', 'error'),
'PASSWORDLESS_LOGIN_SUCCESSFUL': (
'You have successfuly logged in.', 'success'),
'PASSWORD_RESET': (
'You successfully reset your password and you have been logged in automatically.',
'success'),
'PASSWORD_IS_THE_SAME': (
'Your new password must be different than your previous password.', 'error'),
'PASSWORD_CHANGE': (
'You successfully changed your password.', 'success'),
'LOGIN': (
'Please log in to access this page.', 'info'),
'REFRESH': (
'Please reauthenticate to access this page.', 'info'),
}
_default_field_labels = {
'EMAIL': 'Email Address',
'PASSWORD': 'Password',
'REMEMBER_ME': 'Remember Me',
'LOGIN': 'Login',
'RETYPE_PASSWORD': 'Retype Password',
'REGISTER': 'Register',
'SEND_CONFIRMATION': 'Resend Confirmation Instructions',
'RECOVER_PASSWORD': 'Recover Password',
'RESET_PASSWORD': 'Reset Password',
'RETYPE_PASSWORD': 'Retype Password',
'NEW_PASSWORD': 'New Password',
'CHANGE_PASSWORD': 'Change Password',
'SEND_LOGIN_LINK': 'Send Login Link'
}
_allowed_password_hash_schemes = [
'bcrypt',
'des_crypt',
'pbkdf2_sha256',
'pbkdf2_sha512',
'sha256_crypt',
'sha512_crypt',
# And always last one...
'plaintext'
]
_default_forms = {
'login_form': LoginForm,
'confirm_register_form': ConfirmRegisterForm,
'register_form': RegisterForm,
'forgot_password_form': ForgotPasswordForm,
'reset_password_form': ResetPasswordForm,
'change_password_form': ChangePasswordForm,
'send_confirmation_form': SendConfirmationForm,
'passwordless_login_form': PasswordlessLoginForm,
}
def _user_loader(user_id):
return _security.datastore.find_user(id=user_id)
def _token_loader(token):
try:
data = _security.remember_token_serializer.loads(token)
user = _security.datastore.find_user(id=data[0])
if user and safe_str_cmp(md5(user.password), data[1]):
return user
except:
pass
return AnonymousUser()
def _identity_loader():
if not isinstance(current_user._get_current_object(), AnonymousUser):
identity = Identity(current_user.id)
return identity
def _on_identity_loaded(sender, identity):
if hasattr(current_user, 'id'):
identity.provides.add(UserNeed(current_user.id))
for role in current_user.roles:
identity.provides.add(RoleNeed(role.name))
identity.user = current_user
def _get_login_manager(app):
lm = LoginManager()
lm.anonymous_user = AnonymousUser
lm.login_view = '%s.login' % cv('BLUEPRINT_NAME', app=app)
lm.user_loader(_user_loader)
lm.token_loader(_token_loader)
if cv('FLASH_MESSAGES', app=app):
lm.login_message, lm.login_message_category = cv('MSG_LOGIN', app=app)
lm.needs_refresh_message, lm.needs_refresh_message_category = cv('MSG_REFRESH', app=app)
else:
lm.login_message = None
lm.needs_refresh_message = None
lm.init_app(app)
return lm
def _get_principal(app):
p = Principal(app)
p.identity_loader(_identity_loader)
return p
def _get_pwd_context(app):
pw_hash = cv('PASSWORD_HASH', app=app)
schemes = cv('PASSWORD_SCHEMES', app=app)
deprecated = cv('DEPRECATED_PASSWORD_SCHEMES', app=app)
if pw_hash not in schemes:
allowed = (', '.join(schemes[:-1]) + ' and ' + schemes[-1])
raise ValueError("Invalid hash scheme %r. Allowed values are %s" % (pw_hash, allowed))
return CryptContext(schemes=schemes, default=pw_hash, deprecated=deprecated)
def _get_serializer(app, name):
secret_key = app.config.get('SECRET_KEY')
salt = app.config.get('SECURITY_%s_SALT' % name.upper())
return URLSafeTimedSerializer(secret_key=secret_key, salt=salt)
def _get_state(app, datastore, **kwargs):
for key, value in get_config(app).items():
kwargs[key.lower()] = value
kwargs.update(dict(
app=app,
datastore=datastore,
login_manager=_get_login_manager(app),
principal=_get_principal(app),
pwd_context=_get_pwd_context(app),
remember_token_serializer=_get_serializer(app, 'remember'),
login_serializer=_get_serializer(app, 'login'),
reset_serializer=_get_serializer(app, 'reset'),
confirm_serializer=_get_serializer(app, 'confirm'),
_context_processors={},
_send_mail_task=None
))
for key, value in _default_forms.items():
if key not in kwargs or not kwargs[key]:
kwargs[key] = value
return _SecurityState(**kwargs)
def _context_processor():
return dict(url_for_security=url_for_security, security=_security)
class RoleMixin(object):
"""Mixin for `Role` model definitions"""
def __eq__(self, other):
return (self.name == other or
self.name == getattr(other, 'name', None))
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
class UserMixin(BaseUserMixin):
"""Mixin for `User` model definitions"""
def is_active(self):
"""Returns `True` if the user is active."""
return self.active
def get_auth_token(self):
"""Returns the user's authentication token."""
data = [str(self.id), md5(self.password)]
return _security.remember_token_serializer.dumps(data)
def has_role(self, role):
"""Returns `True` if the user identifies with the specified role.
:param role: A role name or `Role` instance"""
if isinstance(role, string_types):
return role in (role.name for role in self.roles)
else:
return role in self.roles
class AnonymousUser(AnonymousUserMixin):
"""AnonymousUser definition"""
def __init__(self):
self.roles = ImmutableList()
def has_role(self, *args):
"""Returns `False`"""
return False
class _SecurityState(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key.lower(), value)
def _add_ctx_processor(self, endpoint, fn):
group = self._context_processors.setdefault(endpoint, [])
fn not in group and group.append(fn)
def _run_ctx_processor(self, endpoint):
rv = {}
for g in [None, endpoint]:
for fn in self._context_processors.setdefault(g, []):
rv.update(fn())
return rv
def context_processor(self, fn):
self._add_ctx_processor(None, fn)
def forgot_password_context_processor(self, fn):
self._add_ctx_processor('forgot_password', fn)
def login_context_processor(self, fn):
self._add_ctx_processor('login', fn)
def register_context_processor(self, fn):
self._add_ctx_processor('register', fn)
def reset_password_context_processor(self, fn):
self._add_ctx_processor('reset_password', fn)
def change_password_context_processor(self, fn):
self._add_ctx_processor('change_password', fn)
def send_confirmation_context_processor(self, fn):
self._add_ctx_processor('send_confirmation', fn)
def send_login_context_processor(self, fn):
self._add_ctx_processor('send_login', fn)
def mail_context_processor(self, fn):
self._add_ctx_processor('mail', fn)
def send_mail_task(self, fn):
self._send_mail_task = fn
class Security(object):
"""The :class:`Security` class initializes the Flask-Security extension.
:param app: The application.
:param datastore: An instance of a user datastore.
"""
def __init__(self, app=None, datastore=None, **kwargs):
self.app = app
self.datastore = datastore
if app is not None and datastore is not None:
self._state = self.init_app(app, datastore, **kwargs)
def init_app(self, app, datastore=None, register_blueprint=True,
login_form=None, confirm_register_form=None,
register_form=None, forgot_password_form=None,
reset_password_form=None, change_password_form=None,
send_confirmation_form=None, passwordless_login_form=None):
"""Initializes the Flask-Security extension for the specified
application and datastore implentation.
:param app: The application.
:param datastore: An instance of a user datastore.
:param register_blueprint: to register the Security blueprint or not.
"""
datastore = datastore or self.datastore
for key, value in _default_config.items():
app.config.setdefault('SECURITY_' + key, value)
for key, value in _default_messages.items():
app.config.setdefault('SECURITY_MSG_' + key, value)
for key, value in _default_field_labels.items():
app.config.setdefault('SECURITY_LABEL_' + key, value)
identity_loaded.connect_via(app)(_on_identity_loaded)
state = _get_state(app, datastore,
login_form=login_form,
confirm_register_form=confirm_register_form,
register_form=register_form,
forgot_password_form=forgot_password_form,
reset_password_form=reset_password_form,
change_password_form=change_password_form,
send_confirmation_form=send_confirmation_form,
passwordless_login_form=passwordless_login_form)
if register_blueprint:
app.register_blueprint(create_blueprint(state, __name__))
app.context_processor(_context_processor)
state.render_template = self.render_template
app.extensions['security'] = state
return state
def render_template(self, *args, **kwargs):
return render_template(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._state, name, None)
| mit |
yawnosnorous/python-for-android | python3-alpha/extra_modules/gdata/calendar/data.py | 121 | 9852 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Google Calendar Data API"""
__author__ = '[email protected] (Jeff Scudder)'
import atom.core
import atom.data
import gdata.acl.data
import gdata.data
import gdata.geo.data
import gdata.opensearch.data
GCAL_NAMESPACE = 'http://schemas.google.com/gCal/2005'
GCAL_TEMPLATE = '{%s}%%s' % GCAL_NAMESPACE
WEB_CONTENT_LINK_REL = '%s/%s' % (GCAL_NAMESPACE, 'webContent')
class AccessLevelProperty(atom.core.XmlElement):
"""Describes how much a given user may do with an event or calendar"""
_qname = GCAL_TEMPLATE % 'accesslevel'
value = 'value'
class AllowGSync2Property(atom.core.XmlElement):
"""Whether the user is permitted to run Google Apps Sync"""
_qname = GCAL_TEMPLATE % 'allowGSync2'
value = 'value'
class AllowGSyncProperty(atom.core.XmlElement):
"""Whether the user is permitted to run Google Apps Sync"""
_qname = GCAL_TEMPLATE % 'allowGSync'
value = 'value'
class AnyoneCanAddSelfProperty(atom.core.XmlElement):
"""Whether anyone can add self as attendee"""
_qname = GCAL_TEMPLATE % 'anyoneCanAddSelf'
value = 'value'
class CalendarAclRole(gdata.acl.data.AclRole):
"""Describes the Calendar roles of an entry in the Calendar access control list"""
_qname = gdata.acl.data.GACL_TEMPLATE % 'role'
class CalendarCommentEntry(gdata.data.GDEntry):
"""Describes an entry in a feed of a Calendar event's comments"""
class CalendarCommentFeed(gdata.data.GDFeed):
"""Describes feed of a Calendar event's comments"""
entry = [CalendarCommentEntry]
class CalendarComments(gdata.data.Comments):
"""Describes a container of a feed link for Calendar comment entries"""
_qname = gdata.data.GD_TEMPLATE % 'comments'
class CalendarExtendedProperty(gdata.data.ExtendedProperty):
"""Defines a value for the realm attribute that is used only in the calendar API"""
_qname = gdata.data.GD_TEMPLATE % 'extendedProperty'
class CalendarWhere(gdata.data.Where):
"""Extends the base Where class with Calendar extensions"""
_qname = gdata.data.GD_TEMPLATE % 'where'
class ColorProperty(atom.core.XmlElement):
"""Describes the color of a calendar"""
_qname = GCAL_TEMPLATE % 'color'
value = 'value'
class GuestsCanInviteOthersProperty(atom.core.XmlElement):
"""Whether guests can invite others to the event"""
_qname = GCAL_TEMPLATE % 'guestsCanInviteOthers'
value = 'value'
class GuestsCanModifyProperty(atom.core.XmlElement):
"""Whether guests can modify event"""
_qname = GCAL_TEMPLATE % 'guestsCanModify'
value = 'value'
class GuestsCanSeeGuestsProperty(atom.core.XmlElement):
"""Whether guests can see other attendees"""
_qname = GCAL_TEMPLATE % 'guestsCanSeeGuests'
value = 'value'
class HiddenProperty(atom.core.XmlElement):
"""Describes whether a calendar is hidden"""
_qname = GCAL_TEMPLATE % 'hidden'
value = 'value'
class IcalUIDProperty(atom.core.XmlElement):
"""Describes the UID in the ical export of the event"""
_qname = GCAL_TEMPLATE % 'uid'
value = 'value'
class OverrideNameProperty(atom.core.XmlElement):
"""Describes the override name property of a calendar"""
_qname = GCAL_TEMPLATE % 'overridename'
value = 'value'
class PrivateCopyProperty(atom.core.XmlElement):
"""Indicates whether this is a private copy of the event, changes to which should not be sent to other calendars"""
_qname = GCAL_TEMPLATE % 'privateCopy'
value = 'value'
class QuickAddProperty(atom.core.XmlElement):
"""Describes whether gd:content is for quick-add processing"""
_qname = GCAL_TEMPLATE % 'quickadd'
value = 'value'
class ResourceProperty(atom.core.XmlElement):
"""Describes whether gd:who is a resource such as a conference room"""
_qname = GCAL_TEMPLATE % 'resource'
value = 'value'
id = 'id'
class EventWho(gdata.data.Who):
"""Extends the base Who class with Calendar extensions"""
_qname = gdata.data.GD_TEMPLATE % 'who'
resource = ResourceProperty
class SelectedProperty(atom.core.XmlElement):
"""Describes whether a calendar is selected"""
_qname = GCAL_TEMPLATE % 'selected'
value = 'value'
class SendAclNotificationsProperty(atom.core.XmlElement):
"""Describes whether to send ACL notifications to grantees"""
_qname = GCAL_TEMPLATE % 'sendAclNotifications'
value = 'value'
class CalendarAclEntry(gdata.acl.data.AclEntry):
"""Describes an entry in a feed of a Calendar access control list (ACL)"""
send_acl_notifications = SendAclNotificationsProperty
class CalendarAclFeed(gdata.data.GDFeed):
"""Describes a Calendar access contorl list (ACL) feed"""
entry = [CalendarAclEntry]
class SendEventNotificationsProperty(atom.core.XmlElement):
"""Describes whether to send event notifications to other participants of the event"""
_qname = GCAL_TEMPLATE % 'sendEventNotifications'
value = 'value'
class SequenceNumberProperty(atom.core.XmlElement):
"""Describes sequence number of an event"""
_qname = GCAL_TEMPLATE % 'sequence'
value = 'value'
class CalendarRecurrenceExceptionEntry(gdata.data.GDEntry):
"""Describes an entry used by a Calendar recurrence exception entry link"""
uid = IcalUIDProperty
sequence = SequenceNumberProperty
class CalendarRecurrenceException(gdata.data.RecurrenceException):
"""Describes an exception to a recurring Calendar event"""
_qname = gdata.data.GD_TEMPLATE % 'recurrenceException'
class SettingsProperty(atom.core.XmlElement):
"""User preference name-value pair"""
_qname = GCAL_TEMPLATE % 'settingsProperty'
name = 'name'
value = 'value'
class SettingsEntry(gdata.data.GDEntry):
"""Describes a Calendar Settings property entry"""
settings_property = SettingsProperty
class CalendarSettingsFeed(gdata.data.GDFeed):
"""Personal settings for Calendar application"""
entry = [SettingsEntry]
class SuppressReplyNotificationsProperty(atom.core.XmlElement):
"""Lists notification methods to be suppressed for this reply"""
_qname = GCAL_TEMPLATE % 'suppressReplyNotifications'
methods = 'methods'
class SyncEventProperty(atom.core.XmlElement):
"""Describes whether this is a sync scenario where the Ical UID and Sequence number are honored during inserts and updates"""
_qname = GCAL_TEMPLATE % 'syncEvent'
value = 'value'
class When(gdata.data.When):
"""Extends the gd:when element to add reminders"""
reminder = [gdata.data.Reminder]
class CalendarEventEntry(gdata.data.BatchEntry):
"""Describes a Calendar event entry"""
quick_add = QuickAddProperty
send_event_notifications = SendEventNotificationsProperty
sync_event = SyncEventProperty
anyone_can_add_self = AnyoneCanAddSelfProperty
extended_property = [CalendarExtendedProperty]
sequence = SequenceNumberProperty
guests_can_invite_others = GuestsCanInviteOthersProperty
guests_can_modify = GuestsCanModifyProperty
guests_can_see_guests = GuestsCanSeeGuestsProperty
georss_where = gdata.geo.data.GeoRssWhere
private_copy = PrivateCopyProperty
suppress_reply_notifications = SuppressReplyNotificationsProperty
uid = IcalUIDProperty
where = [gdata.data.Where]
when = [When]
who = [gdata.data.Who]
transparency = gdata.data.Transparency
comments = gdata.data.Comments
event_status = gdata.data.EventStatus
visibility = gdata.data.Visibility
recurrence = gdata.data.Recurrence
recurrence_exception = [gdata.data.RecurrenceException]
original_event = gdata.data.OriginalEvent
reminder = [gdata.data.Reminder]
class TimeZoneProperty(atom.core.XmlElement):
"""Describes the time zone of a calendar"""
_qname = GCAL_TEMPLATE % 'timezone'
value = 'value'
class TimesCleanedProperty(atom.core.XmlElement):
"""Describes how many times calendar was cleaned via Manage Calendars"""
_qname = GCAL_TEMPLATE % 'timesCleaned'
value = 'value'
class CalendarEntry(gdata.data.GDEntry):
"""Describes a Calendar entry in the feed of a user's calendars"""
timezone = TimeZoneProperty
overridename = OverrideNameProperty
hidden = HiddenProperty
selected = SelectedProperty
times_cleaned = TimesCleanedProperty
color = ColorProperty
where = [CalendarWhere]
accesslevel = AccessLevelProperty
class CalendarEventFeed(gdata.data.BatchFeed):
"""Describes a Calendar event feed"""
allow_g_sync2 = AllowGSync2Property
timezone = TimeZoneProperty
entry = [CalendarEventEntry]
times_cleaned = TimesCleanedProperty
allow_g_sync = AllowGSyncProperty
class CalendarFeed(gdata.data.GDFeed):
"""Describes a feed of Calendars"""
entry = [CalendarEntry]
class WebContentGadgetPref(atom.core.XmlElement):
"""Describes a single web content gadget preference"""
_qname = GCAL_TEMPLATE % 'webContentGadgetPref'
name = 'name'
value = 'value'
class WebContent(atom.core.XmlElement):
"""Describes a "web content" extension"""
_qname = GCAL_TEMPLATE % 'webContent'
height = 'height'
width = 'width'
web_content_gadget_pref = [WebContentGadgetPref]
url = 'url'
display = 'display'
class WebContentLink(atom.data.Link):
"""Describes a "web content" link"""
def __init__(self, title=None, href=None, link_type=None,
web_content=None):
atom.data.Link.__init__(self, rel=WEB_CONTENT_LINK_REL, title=title, href=href,
link_type=link_type)
web_content = WebContent
| apache-2.0 |
xpavlus/domaintests | domtests.py | 1 | 4740 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
module: domaintests
Module's basic class.
author: Pavlovskiy Pavel
email: [email protected]
"""
try:
from HostClass import Host
from dnstestClass import dnsTest
except ImportError, err:
raise ImportError("Error importing DnsTests modules. Please check whether they are exist.")
class TestFail(Exception):
# TODO: Проанализировать модуль telnet и наделать исключений под самые распространанные исключения
pass
class DomTest(object):
"""
The basics class to be initiated to use domaintests modle
"""
def __init__(self, domain = None, is_verbose=False):
"""
@param domain: string argument to set the domain name to be tested
@type domain: string
@param is_verbose: set it to True if you want to enable verbose output
@type is_verbose: bool
@returns: class domaintests.domtests.DomTest
"""
self.domain = Host(domain)
self.is_verbose = bool(is_verbose)
self.verb_pref = None
def telnetTest(self, port = 25, timeout = 15):
"""Tring to discover the hostname of the server which is hosted on the domain. Essence of this test is lie in the
Attempt to connect to the domain on the 25 port (or any other), reading a smtp session header, and searching
any domain in it afterwards
@param port: specifying an optional port number (by default - 25) on which connection will be established
@type port: integer
@param timeout: option parameter (by default 15 sec) specifies a timeout in seconds for interrupting attempting
to connect to the domain
@type timeout: integer
@return: string -- domain which was mentioned in the header of smtp session
@raise: TestFail in case if smtp header doesn't contain any domain
"""
import telnetlib
import sys
import re
self.verb_pref = "Telnet Test"
self.verbosing("Testing...")
exhaust, ret = "None", "None"
pat_telnet = re.compile(r".((?:[\w_-]+\.)+[a-z]{2,4}).*")
try:
tn = telnetlib.Telnet(self.domain.name, port)
# Status 220 means that connection was sucessfull
exhaust = tn.read_until("220", timeout)
except:
print "With '%s' goes something wrong: %s" % (self.domain.name, sys.exc_info()[0])
else:
self.verbosing("Telnet has initiated without error...")
while exhaust != "" and ret == "None":
self.verbosing("exhaust is: %s" % exhaust)
if pat_telnet.match(exhaust):
self.verbosing("Pattern has matched...")
ret = pat_telnet.findall(exhaust)[0]
continue
else:
self.verbosing("Pattern hasn't matched...")
exhaust = tn.read_until("220", timeout)
if exhaust == "" and ret == "None":
raise TestFail("Unusual domain. Please try to execute command: \"telnet %s 25\" at hand" % self.domain.name)
finally:
if "tn" in locals():
tn.close()
self.verbosing("Result: '%s'"%ret)
return ret
def dnsTest(self, name_servers = None, timeout = 5):
"""
This test tries to resolve the domain name to IP and resolve this IP to the domain afterwards
@param name_servers: the list of name-servers where query will be sent
@type name-servers: list or string
@param timeout: timeout of attempt to receive reply from server
@type timeot: integer
@return: list of string – list of domians
"""
self.verb_pref = "DNS Test"
d1 = dnsTest(self.domain.name, name_servers, timeout)
PTRs = []
self.verbosing("Whether it's a domain - %s" % d1.domain.isDomain())
if d1.domain.isDomain():
self.verbosing("IPs: %s" % d1.getIPs())
for ip in d1.getIPs():
self.verbosing("Current IP: %s" % ip)
Cip = dnsTest(ip)
reply = Cip.getPTR()
self.verbosing("Returned PTR: %s" % reply)
if reply not in PTRs:
PTRs.append(reply)
else:
PTRs.append(d1.getPTR())
self.verbosing("Whole list of PTRs: %s" % PTRs)
return PTRs
def verbosing(self, mess):
if self.is_verbose:
if self.verb_pref is None:
print(mess)
else:
print("[%s] %s" % (self.verb_pref, mess))
if __name__ == "__main__":
pass
| gpl-2.0 |
toringe/cc2asn | cc2asn-server.py | 1 | 11091 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
CC2ASN Server
Basic query-response server listen on a tcp-port (default 43) for
incoming requests. The only valid and required request is an
ISO-3166-1 alpha-2 country code (e.g. NO for Norway). The server
will respond back with the list of registered AS-numbers.
Optionally a record type (IPv4, IPv6 or ALL) may be specified to
get prefixes instead of ASNs, or to get everything that is
registered for this country. Logs all system messages and client
queries to local syslog.
Author: Tor Inge Skaar
'''
# Core modules
import os
import re
import sys
import pwd
import grp
import errno
import signal
import argparse
import datetime
import configobj
import threading
import SocketServer
import logging
from logging.handlers import SysLogHandler
# Each time a client connect, a new instance of this class is created.
class RequestHandler(SocketServer.BaseRequestHandler):
# Handle the incomming request
def handle(self):
# Receive 8 bytes of data, and convert to uppercase
try:
sockdata = self.request.recv(8)
except IOError as e:
if e.errno == errno.ECONNRESET:
self.server.logger.warning('Connection reset by client')
return
else:
raise
if sockdata is not None:
sockdata = sockdata.strip().upper()
else:
self.server.logger.warning('No client data received')
return
# Client IP
client = self.client_address[0]
# First match cc search without rectype
ccmatch = re.match('^([A-Z]{2})$', sockdata)
if ccmatch is not None:
# Defaulting to ASN
rectype = 'ASN'
cc = ccmatch.group(1)
else:
# Check if record type is defined
recmatch = re.match('^(ALL|ASN|IPV4|IPV6) ([A-Z]{2})$', sockdata)
if recmatch is not None:
rectype = recmatch.group(1)
cc = recmatch.group(2)
else:
self.server.logger.error('Invalid query from ' + client +
': ' + str(sockdata))
return
# Construct path to file and send the contents to client
datafile = cc + '_' + rectype
datapath = os.path.join(self.server.config.get('DBDIR'), datafile)
if os.path.isfile(datapath) and os.access(datapath, os.R_OK):
with open(datapath, 'r') as data:
self.request.send(data.read())
self.logclient(client, rectype, cc)
else:
self.server.logger.warning('Client ' + client +
' queried for missing file: '+datapath)
return
# Log client requests
def logclient(self, ip, rectype, cc):
if self.server.clientlog is None:
# Use syslog
self.server.logger.info('Query: ' + ip + ' ' + rectype + ' ' + cc)
else:
# Use custom log
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log = open(self.server.clientlog, 'a')
log.write('{} {} {} {}\n'.format(now, ip, rectype, cc))
log.close()
# End class
# Change execution UID and GID
def drop_privileges(uid_name, gid_name):
# We're not root, so everythings fine then..
if os.getuid() != 0:
return
# Get the uid/gid from the name
try:
running_uid = pwd.getpwnam(uid_name).pw_uid
except KeyError:
e = 'Unable to drop privileges. No such user: {}'.format(uid_name)
logger.critical(e)
exit(e)
try:
running_gid = grp.getgrnam(gid_name).gr_gid
except KeyError:
e = 'Unable to drop privileges. No such group: {}'.format(gid_name)
logger.critical(e)
exit(e)
# Remove group privileges
os.setgroups([])
# Try setting the new uid/gid
os.setgid(running_gid)
os.setuid(running_uid)
# Ensure a very conservative umask
old_umask = os.umask(077)
# Run process as a daemon by double forking
def daemonize():
try:
pid = os.fork()
if pid > 0:
# Exit first parent
exit()
except OSError as e:
exit('Fork #1 failed: {} ({})'.format(e.errno, e.strerror))
os.chdir('/')
os.setsid()
os.umask(0)
try:
pid = os.fork()
if pid > 0:
# Exit second parent
exit()
except OSError as e:
exit('Fork #2 failed: {} ({})'.format(e.errno, e.strerror))
# Handle user input
def parse_input():
parser = argparse.ArgumentParser(description='CC2ASN Server')
parser.add_argument('-c', dest='confpath', help='Path to config file')
parser.add_argument('-d', dest='daemon', action='store_true',
help='Daemonize server')
parser.add_argument('-l', dest='clientlog',
help='Log client requests to custom file')
parser.add_argument('-p', dest='pidfile', help='Path to PID file')
parser.add_argument('-V', action='version', version='CC2ASN Server v.1')
args = parser.parse_known_args()[0]
if args.confpath is None:
args.confpath = '/etc/default/cc2asn'
logger.info('No config file specified. Using {}'.format(args.confpath))
return args
# Create an empty file
def touch(filename, desc):
if os.path.isfile(filename) is True:
return
else:
try:
f = open(filename, 'w+')
f.close()
logger.info('{}: {}'.format(desc, filename))
except IOError as e:
errmsg = e.strerror + ': ' + filename
logger.critical(errmsg)
exit(errmsg)
# Create subdirectory for pid file.
# This enables deletion after we drop privileges.
def create_piddir(piddir, user, group):
# Create directory if needed
if os.path.exists(piddir) is False:
try:
os.mkdir(piddir)
except OSError as e:
logger.error('Failed to create directory: {}'.format(piddir))
# Change owner
try:
uid = pwd.getpwnam(user).pw_uid
gid = grp.getgrnam(group).gr_gid
os.chown(piddir, uid, gid)
except OSError as e:
logger.error('Failed to chown {}'.format(piddir))
# Create PID file and check/set permissions
def create_pidfile(pidfile, pid):
if os.path.isfile(pidfile) is False:
try:
f = open(pidfile, 'w+')
f.write(str(pid))
f.close()
logger.info('PID file created: {}'.format(pidfile))
except IOError as e:
logger.error('Failed to create pid file: {}'.format(pidfile))
else:
logger.warning('PID file already exists. Stale file?')
# Create signal handlers for the usual interrupts
def signal_handling():
logger.info('Installing signal handlers')
signal.signal(signal.SIGINT, cleanup)
signal.signal(signal.SIGTERM, cleanup)
signal.signal(signal.SIGQUIT, cleanup)
# Cleanup process in separate thread
def cleanup(signal, frame):
logger.warning('Interrupted by {}'.format(signalname[signal]))
t = threading.Thread(target=shutdown_handler, args=(shutdown_event,))
t.start()
# Proper shutdown of socketserver
def shutdown_handler(event):
logger.info('Shutting down server')
# Cleanly shutdown server
try:
server.shutdown()
logger.info('Successful shutdown')
except Exception as e:
logger.error('Failed: {}'.format(e.strerror))
# Remove pid file
try:
# was config.get(pidfile)
pidfile = args.pidfile
logger.info('Removing PID file: {}'.format(pidfile))
os.remove(pidfile)
except OSError as e:
logger.warning('Failed to remove PID file. {}'.format(e.strerror))
# Tell thread that shutdown event is complete
event.set()
return
# Main execution
if __name__ == '__main__':
# Log to local syslog
logger = logging.getLogger('CC2ASN')
logger.setLevel(logging.INFO)
syslog = SysLogHandler(address='/dev/log')
formatter = logging.Formatter('%(name)s: <%(levelname)s> - %(message)s')
syslog.setFormatter(formatter)
logger.addHandler(syslog)
# Handle user input
args = parse_input()
# Create signal name lookup
signalname = dict((k, v) for v, k in
signal.__dict__.iteritems() if v.startswith('SIG'))
signal_handling()
# Load configuration
if os.access(args.confpath, os.R_OK):
config = configobj.ConfigObj(args.confpath)
else:
exit('Failed to read configuration file: {}'.format(args.confpath))
# Allow server to reuse a socket immediately after socket closure
SocketServer.TCPServer.allow_reuse_address = True
# Kill server thread when main thread terminates
SocketServer.TCPServer.daemon_threads = True
# Create a threaded TCP server, spawning separate threats for each client
listen = int(config.get('PORT'))
try:
server = SocketServer.ThreadingTCPServer(('', listen), RequestHandler)
(ip, port) = server.server_address
logger.info('Server bound to {}:{}'.format(ip, port))
except IOError as e:
if e.errno == 13:
errmsg = 'Premission denied to bind port {}'.format(listen)
else:
errmsg = e.strerror
logger.critical(errmsg)
exit(errmsg)
# Share variables with server
server.clientlog = args.clientlog
server.config = config
server.logger = logger
if args.daemon is True:
# Get settings from config
user = config.get('RUNUSER')
group = config.get('RUNGROUP')
# Set default pid file if not specified
if args.pidfile is None:
args.pidfile = '/var/run/cc2asn/cc2asn-server.pid'
# Create subdirectory for pid file
create_piddir(os.path.dirname(args.pidfile), user, group)
# Drop root privileges
drop_privileges(user, group)
logger.info('Privileges dropped to {}:{}'.format(user, group))
# Daemonize
daemonize()
pid = os.getpid()
logger.info('Daemonized (pid {})'.format(pid))
# Create PID file
create_pidfile(args.pidfile, pid)
else:
logger.info('Server running in foreground (pid {})'
.format(os.getpid()))
# If custom log is set, create it if not exists
if args.clientlog is not None:
if os.path.isfile(args.clientlog) is False:
touch(args.clientlog, 'Client log')
else:
if os.access(args.clientlog, os.W_OK) is False:
errmsg = 'Unable to write to file: {}'.format(args.clientlog)
logger.critical(errmsg)
exit(errmsg)
# Create an event for the shutdown process to set
shutdown_event = threading.Event()
# Server must handle requests indefinitely until a shutdown is requested
server.serve_forever()
# Main thread will wait for shutdown to finish
shutdown_event.wait()
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/django/contrib/gis/db/backends/mysql/operations.py | 31 | 4263 | from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import GeometryField, aggregates
from django.db.backends.mysql.operations import DatabaseOperations
from django.utils.functional import cached_property
class MySQLOperations(BaseSpatialOperations, DatabaseOperations):
mysql = True
name = 'mysql'
Adapter = WKTAdapter
@cached_property
def geom_func_prefix(self):
return '' if self.is_mysql_5_5 else 'ST_'
@cached_property
def is_mysql_5_5(self):
return self.connection.mysql_version < (5, 6, 1)
@cached_property
def is_mysql_5_6(self):
return self.connection.mysql_version < (5, 7, 6)
@cached_property
def uses_invalid_empty_geometry_collection(self):
return self.connection.mysql_version >= (5, 7, 5)
@cached_property
def select(self):
return self.geom_func_prefix + 'AsText(%s)'
@cached_property
def from_wkb(self):
return self.geom_func_prefix + 'GeomFromWKB'
@cached_property
def from_text(self):
return self.geom_func_prefix + 'GeomFromText'
@cached_property
def gis_operators(self):
MBREquals = 'MBREqual' if self.is_mysql_5_6 else 'MBREquals'
return {
'bbcontains': SpatialOperator(func='MBRContains'), # For consistency w/PostGIS API
'bboverlaps': SpatialOperator(func='MBROverlaps'), # ...
'contained': SpatialOperator(func='MBRWithin'), # ...
'contains': SpatialOperator(func='MBRContains'),
'disjoint': SpatialOperator(func='MBRDisjoint'),
'equals': SpatialOperator(func=MBREquals),
'exact': SpatialOperator(func=MBREquals),
'intersects': SpatialOperator(func='MBRIntersects'),
'overlaps': SpatialOperator(func='MBROverlaps'),
'same_as': SpatialOperator(func=MBREquals),
'touches': SpatialOperator(func='MBRTouches'),
'within': SpatialOperator(func='MBRWithin'),
}
@cached_property
def function_names(self):
return {'Length': 'GLength'} if self.is_mysql_5_5 else {}
disallowed_aggregates = (
aggregates.Collect, aggregates.Extent, aggregates.Extent3D,
aggregates.MakeLine, aggregates.Union,
)
@cached_property
def unsupported_functions(self):
unsupported = {
'AsGeoJSON', 'AsGML', 'AsKML', 'AsSVG', 'BoundingCircle',
'ForceRHR', 'GeoHash', 'IsValid', 'MakeValid', 'MemSize',
'Perimeter', 'PointOnSurface', 'Reverse', 'Scale', 'SnapToGrid',
'Transform', 'Translate',
}
if self.is_mysql_5_5:
unsupported.update({'Difference', 'Distance', 'Intersection', 'SymDifference', 'Union'})
return unsupported
def geo_db_type(self, f):
return f.geom_type
def get_geom_placeholder(self, f, value, compiler):
"""
The placeholder here has to include MySQL's WKT constructor. Because
MySQL does not support spatial transformations, there is no need to
modify the placeholder based on the contents of the given value.
"""
if hasattr(value, 'as_sql'):
placeholder, _ = compiler.compile(value)
else:
placeholder = '%s(%%s)' % self.from_text
return placeholder
def get_db_converters(self, expression):
converters = super(MySQLOperations, self).get_db_converters(expression)
if isinstance(expression.output_field, GeometryField) and self.uses_invalid_empty_geometry_collection:
converters.append(self.convert_invalid_empty_geometry_collection)
return converters
# https://dev.mysql.com/doc/refman/en/spatial-function-argument-handling.html
# MySQL 5.7.5 adds support for the empty geometry collections, but they are represented with invalid WKT.
def convert_invalid_empty_geometry_collection(self, value, expression, connection, context):
if value == b'GEOMETRYCOLLECTION()':
return b'GEOMETRYCOLLECTION EMPTY'
return value
| gpl-3.0 |
LoHChina/nova | nova/virt/hyperv/migrationops.py | 25 | 13148 | # Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for migration / resize operations.
"""
import os
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from nova import exception
from nova.i18n import _, _LE
from nova.virt import configdrive
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmops
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
class MigrationOps(object):
def __init__(self):
self._hostutils = utilsfactory.get_hostutils()
self._vmutils = utilsfactory.get_vmutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._pathutils = utilsfactory.get_pathutils()
self._volumeops = volumeops.VolumeOps()
self._vmops = vmops.VMOps()
self._imagecache = imagecache.ImageCache()
def _migrate_disk_files(self, instance_name, disk_files, dest):
# TODO(mikal): it would be nice if this method took a full instance,
# because it could then be passed to the log messages below.
same_host = False
if dest in self._hostutils.get_local_ips():
same_host = True
LOG.debug("Migration target is the source host")
else:
LOG.debug("Migration target host: %s", dest)
instance_path = self._pathutils.get_instance_dir(instance_name)
revert_path = self._pathutils.get_instance_migr_revert_dir(
instance_name, remove_dir=True, create_dir=True)
dest_path = None
try:
if same_host:
# Since source and target are the same, we copy the files to
# a temporary location before moving them into place
dest_path = '%s_tmp' % instance_path
if self._pathutils.exists(dest_path):
self._pathutils.rmtree(dest_path)
self._pathutils.makedirs(dest_path)
else:
dest_path = self._pathutils.get_instance_dir(
instance_name, dest, remove_dir=True)
for disk_file in disk_files:
# Skip the config drive as the instance is already configured
if os.path.basename(disk_file).lower() != 'configdrive.vhd':
LOG.debug('Copying disk "%(disk_file)s" to '
'"%(dest_path)s"',
{'disk_file': disk_file, 'dest_path': dest_path})
self._pathutils.copy(disk_file, dest_path)
self._pathutils.move_folder_files(instance_path, revert_path)
if same_host:
self._pathutils.move_folder_files(dest_path, instance_path)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_failed_disk_migration(instance_path, revert_path,
dest_path)
def _cleanup_failed_disk_migration(self, instance_path,
revert_path, dest_path):
try:
if dest_path and self._pathutils.exists(dest_path):
self._pathutils.rmtree(dest_path)
if self._pathutils.exists(revert_path):
self._pathutils.rename(revert_path, instance_path)
except Exception as ex:
# Log and ignore this exception
LOG.exception(ex)
LOG.error(_LE("Cannot cleanup migration files"))
def _check_target_flavor(self, instance, flavor):
new_root_gb = flavor.root_gb
curr_root_gb = instance.root_gb
if new_root_gb < curr_root_gb:
raise exception.InstanceFaultRollback(
vmutils.VHDResizeException(
_("Cannot resize the root disk to a smaller size. "
"Current size: %(curr_root_gb)s GB. Requested size: "
"%(new_root_gb)s GB") %
{'curr_root_gb': curr_root_gb,
'new_root_gb': new_root_gb}))
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None, timeout=0,
retry_interval=0):
LOG.debug("migrate_disk_and_power_off called", instance=instance)
self._check_target_flavor(instance, flavor)
self._vmops.power_off(instance, timeout, retry_interval)
(disk_files,
volume_drives) = self._vmutils.get_vm_storage_paths(instance.name)
if disk_files:
self._migrate_disk_files(instance.name, disk_files, dest)
self._vmops.destroy(instance, destroy_disks=False)
# disk_info is not used
return ""
def confirm_migration(self, migration, instance, network_info):
LOG.debug("confirm_migration called", instance=instance)
self._pathutils.get_instance_migr_revert_dir(instance.name,
remove_dir=True)
def _revert_migration_files(self, instance_name):
instance_path = self._pathutils.get_instance_dir(
instance_name, create_dir=False, remove_dir=True)
revert_path = self._pathutils.get_instance_migr_revert_dir(
instance_name)
self._pathutils.rename(revert_path, instance_path)
def _check_and_attach_config_drive(self, instance, vm_gen):
if configdrive.required_by(instance):
configdrive_path = self._pathutils.lookup_configdrive_path(
instance.name)
if configdrive_path:
self._vmops.attach_config_drive(instance, configdrive_path,
vm_gen)
else:
raise vmutils.HyperVException(
_("Config drive is required by instance: %s, "
"but it does not exist.") % instance.name)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug("finish_revert_migration called", instance=instance)
instance_name = instance.name
self._revert_migration_files(instance_name)
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)
eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name)
image_meta = self._imagecache.get_image_details(context, instance)
vm_gen = self._vmops.get_image_vm_generation(root_vhd_path, image_meta)
self._vmops.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path, vm_gen)
self._check_and_attach_config_drive(instance, vm_gen)
if power_on:
self._vmops.power_on(instance)
def _merge_base_vhd(self, diff_vhd_path, base_vhd_path):
base_vhd_copy_path = os.path.join(os.path.dirname(diff_vhd_path),
os.path.basename(base_vhd_path))
try:
LOG.debug('Copying base disk %(base_vhd_path)s to '
'%(base_vhd_copy_path)s',
{'base_vhd_path': base_vhd_path,
'base_vhd_copy_path': base_vhd_copy_path})
self._pathutils.copyfile(base_vhd_path, base_vhd_copy_path)
LOG.debug("Reconnecting copied base VHD "
"%(base_vhd_copy_path)s and diff "
"VHD %(diff_vhd_path)s",
{'base_vhd_copy_path': base_vhd_copy_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
base_vhd_copy_path)
LOG.debug("Merging base disk %(base_vhd_copy_path)s and "
"diff disk %(diff_vhd_path)s",
{'base_vhd_copy_path': base_vhd_copy_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.merge_vhd(diff_vhd_path, base_vhd_copy_path)
# Replace the differential VHD with the merged one
self._pathutils.rename(base_vhd_copy_path, diff_vhd_path)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(base_vhd_copy_path):
self._pathutils.remove(base_vhd_copy_path)
def _check_resize_vhd(self, vhd_path, vhd_info, new_size):
curr_size = vhd_info['MaxInternalSize']
if new_size < curr_size:
raise vmutils.VHDResizeException(_("Cannot resize a VHD "
"to a smaller size"))
elif new_size > curr_size:
self._resize_vhd(vhd_path, new_size)
def _resize_vhd(self, vhd_path, new_size):
if vhd_path.split('.')[-1].lower() == "vhd":
LOG.debug("Getting parent disk info for disk: %s", vhd_path)
base_disk_path = self._vhdutils.get_vhd_parent_path(vhd_path)
if base_disk_path:
# A differential VHD cannot be resized. This limitation
# does not apply to the VHDX format.
self._merge_base_vhd(vhd_path, base_disk_path)
LOG.debug("Resizing disk \"%(vhd_path)s\" to new max "
"size %(new_size)s",
{'vhd_path': vhd_path, 'new_size': new_size})
self._vhdutils.resize_vhd(vhd_path, new_size)
def _check_base_disk(self, context, instance, diff_vhd_path,
src_base_disk_path):
base_vhd_path = self._imagecache.get_cached_image(context, instance)
# If the location of the base host differs between source
# and target hosts we need to reconnect the base disk
if src_base_disk_path.lower() != base_vhd_path.lower():
LOG.debug("Reconnecting copied base VHD "
"%(base_vhd_path)s and diff "
"VHD %(diff_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
base_vhd_path)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
LOG.debug("finish_migration called", instance=instance)
instance_name = instance.name
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)
if not root_vhd_path:
raise vmutils.HyperVException(_("Cannot find boot VHD "
"file for instance: %s") %
instance_name)
root_vhd_info = self._vhdutils.get_vhd_info(root_vhd_path)
src_base_disk_path = root_vhd_info.get("ParentPath")
if src_base_disk_path:
self._check_base_disk(context, instance, root_vhd_path,
src_base_disk_path)
if resize_instance:
new_size = instance.root_gb * units.Gi
self._check_resize_vhd(root_vhd_path, root_vhd_info, new_size)
eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name)
if resize_instance:
new_size = instance.get('ephemeral_gb', 0) * units.Gi
if not eph_vhd_path:
if new_size:
eph_vhd_path = self._vmops.create_ephemeral_vhd(instance)
else:
eph_vhd_info = self._vhdutils.get_vhd_info(eph_vhd_path)
self._check_resize_vhd(eph_vhd_path, eph_vhd_info, new_size)
vm_gen = self._vmops.get_image_vm_generation(root_vhd_path, image_meta)
self._vmops.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path, vm_gen)
self._check_and_attach_config_drive(instance, vm_gen)
if power_on:
self._vmops.power_on(instance)
| apache-2.0 |
platformio/platformio | platformio/commands/settings.py | 1 | 2481 | # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from tabulate import tabulate
from platformio import app
from platformio.compat import string_types
def format_value(raw):
if isinstance(raw, bool):
return "Yes" if raw else "No"
if isinstance(raw, string_types):
return raw
return str(raw)
@click.group(short_help="Manage PlatformIO settings")
def cli():
pass
@cli.command("get", short_help="Get existing setting/-s")
@click.argument("name", required=False)
def settings_get(name):
tabular_data = []
for key, options in sorted(app.DEFAULT_SETTINGS.items()):
if name and name != key:
continue
raw_value = app.get_setting(key)
formatted_value = format_value(raw_value)
if raw_value != options["value"]:
default_formatted_value = format_value(options["value"])
formatted_value += "%s" % (
"\n" if len(default_formatted_value) > 10 else " "
)
formatted_value += "[%s]" % click.style(
default_formatted_value, fg="yellow"
)
tabular_data.append(
(click.style(key, fg="cyan"), formatted_value, options["description"])
)
click.echo(
tabulate(
tabular_data, headers=["Name", "Current value [Default]", "Description"]
)
)
@cli.command("set", short_help="Set new value for the setting")
@click.argument("name")
@click.argument("value")
@click.pass_context
def settings_set(ctx, name, value):
app.set_setting(name, value)
click.secho("The new value for the setting has been set!", fg="green")
ctx.invoke(settings_get, name=name)
@cli.command("reset", short_help="Reset settings to default")
@click.pass_context
def settings_reset(ctx):
app.reset_settings()
click.secho("The settings have been reseted!", fg="green")
ctx.invoke(settings_get)
| apache-2.0 |
unaizalakain/django | django/contrib/messages/api.py | 512 | 3202 | from django.contrib.messages import constants
from django.contrib.messages.storage import default_storage
from django.http import HttpRequest
__all__ = (
'add_message', 'get_messages',
'get_level', 'set_level',
'debug', 'info', 'success', 'warning', 'error',
'MessageFailure',
)
class MessageFailure(Exception):
pass
def add_message(request, level, message, extra_tags='', fail_silently=False):
"""
Attempts to add a message to the request using the 'messages' app.
"""
if not isinstance(request, HttpRequest):
raise TypeError("add_message() argument must be an HttpRequest object, "
"not '%s'." % request.__class__.__name__)
if hasattr(request, '_messages'):
return request._messages.add(level, message, extra_tags)
if not fail_silently:
raise MessageFailure('You cannot add messages without installing '
'django.contrib.messages.middleware.MessageMiddleware')
def get_messages(request):
"""
Returns the message storage on the request if it exists, otherwise returns
an empty list.
"""
if hasattr(request, '_messages'):
return request._messages
else:
return []
def get_level(request):
"""
Returns the minimum level of messages to be recorded.
The default level is the ``MESSAGE_LEVEL`` setting. If this is not found,
the ``INFO`` level is used.
"""
if hasattr(request, '_messages'):
storage = request._messages
else:
storage = default_storage(request)
return storage.level
def set_level(request, level):
"""
Sets the minimum level of messages to be recorded, returning ``True`` if
the level was recorded successfully.
If set to ``None``, the default level will be used (see the ``get_level``
method).
"""
if not hasattr(request, '_messages'):
return False
request._messages.level = level
return True
def debug(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``DEBUG`` level.
"""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def info(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``INFO`` level.
"""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def success(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``SUCCESS`` level.
"""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def warning(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``WARNING`` level.
"""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def error(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``ERROR`` level.
"""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
| bsd-3-clause |
alertby/mbed | workspace_tools/patch.py | 142 | 1934 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
http://www.nxp.com/documents/user_manual/UM10360.pdf
32.3.1.1 Criterion for Valid User Code
The reserved Cortex-M3 exception vector location 7 (offset 0x1C in the vector table)
should contain the 2's complement of the check-sum of table entries 0 through 6. This
causes the checksum of the first 8 table entries to be 0. The boot loader code checksums
the first 8 locations in sector 0 of the flash. If the result is 0, then execution control is
transferred to the user code.
"""
from struct import unpack, pack
def patch(bin_path):
with open(bin_path, 'r+b') as bin:
# Read entries 0 through 6 (Little Endian 32bits words)
vector = [unpack('<I', bin.read(4))[0] for _ in range(7)]
# location 7 (offset 0x1C in the vector table) should contain the 2's
# complement of the check-sum of table entries 0 through 6
bin.seek(0x1C)
bin.write(pack('<I', (~sum(vector) + 1) & 0xFFFFFFFF))
def is_patched(bin_path):
with open(bin_path, 'rb') as bin:
# The checksum of the first 8 table entries should be 0
return (sum([unpack('<I', bin.read(4))[0] for _ in range(8)]) & 0xFFFFFFFF) == 0
if __name__ == '__main__':
bin_path = "C:/Users/emimon01/releases/emilmont/build/test/LPC1768/ARM/MBED_A1/basic.bin"
patch(bin_path)
assert is_patched(bin_path), "The file is not patched"
| apache-2.0 |
miky-kr5/Super-HUGS-Revolution-98 | game.py | 2 | 2943 | ###########################################
# Created on 1-7-2013. Miguel Angel Astor #
###########################################
import pygame
try:
import android
except ImportError:
android = None
import player
from state import VALID_STATES
from intro import IntroState
from menu import MenuState
from ingame import InGameState
from score import ScoreState
from constants import DEBUG
# The Game class implements the state machine of the game and
# runs the main game loop.
class Game:
def __init__(self, screen):
""" Sets the rendering canvas and the intial state. """
self.canvas = screen
self.current_state = VALID_STATES['INTRO']
self.done = False
self.clock = pygame.time.Clock()
# Initialize the different game states.
intro = IntroState()
menu = MenuState()
in_game = InGameState()
score = ScoreState()
# Create a states list.
self.state_vector = [intro, menu, in_game, score]
def get_state(self):
""" Returns the current state of the game. """
if self.current_state == VALID_STATES['INTRO']:
return "INTRO"
elif self.current_state == VALID_STATES['MENU']:
return "MENU"
elif self.current_state == VALID_STATES['IN_GAME']:
return "IN_GAME"
elif self.current_state == VALID_STATES['SCORE']:
return "SCORE"
elif self.current_state == VALID_STATES['QUIT']:
return "QUIT"
else:
return "NOT_VALID"
def game_loop(self):
""" The main game loop. """
while not self.done:
# Get user input first.
self.state_vector[self.current_state].input()
# Then update the game state.
transition = self.state_vector[self.current_state].update()
# Check if a state transition is required.
if transition != VALID_STATES['STAY']:
if transition == VALID_STATES['INTRO']:
self.current_state = VALID_STATES['INTRO']
elif transition == VALID_STATES['MENU']:
self.current_state = VALID_STATES['MENU']
elif transition == VALID_STATES['IN_GAME']:
self.current_state = VALID_STATES['IN_GAME']
elif transition == VALID_STATES['SCORE']:
self.current_state = VALID_STATES['SCORE']
elif transition == VALID_STATES['QUIT']:
self.done = True
self.current_state = VALID_STATES['QUIT']
else:
self.current_state = VALID_STATES['NOT_VALID']
if DEBUG:
print self.get_state()
# If the game is not over, render the current state.
if not self.done:
self.state_vector[self.current_state].render(self.canvas)
pygame.display.update()
self.clock.tick(60)
| bsd-2-clause |
jledet/linux-xlnx | tools/perf/scripts/python/stat-cpi.py | 404 | 2391 | #!/usr/bin/env python
data = {}
times = []
threads = []
cpus = []
def get_key(time, event, cpu, thread):
return "%d-%s-%d-%d" % (time, event, cpu, thread)
def store_key(time, cpu, thread):
if (time not in times):
times.append(time)
if (cpu not in cpus):
cpus.append(cpu)
if (thread not in threads):
threads.append(thread)
def store(time, event, cpu, thread, val, ena, run):
#print "event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" % \
# (event, cpu, thread, time, val, ena, run)
store_key(time, cpu, thread)
key = get_key(time, event, cpu, thread)
data[key] = [ val, ena, run]
def get(time, event, cpu, thread):
key = get_key(time, event, cpu, thread)
return data[key][0]
def stat__cycles_k(cpu, thread, time, val, ena, run):
store(time, "cycles", cpu, thread, val, ena, run);
def stat__instructions_k(cpu, thread, time, val, ena, run):
store(time, "instructions", cpu, thread, val, ena, run);
def stat__cycles_u(cpu, thread, time, val, ena, run):
store(time, "cycles", cpu, thread, val, ena, run);
def stat__instructions_u(cpu, thread, time, val, ena, run):
store(time, "instructions", cpu, thread, val, ena, run);
def stat__cycles(cpu, thread, time, val, ena, run):
store(time, "cycles", cpu, thread, val, ena, run);
def stat__instructions(cpu, thread, time, val, ena, run):
store(time, "instructions", cpu, thread, val, ena, run);
def stat__interval(time):
for cpu in cpus:
for thread in threads:
cyc = get(time, "cycles", cpu, thread)
ins = get(time, "instructions", cpu, thread)
cpi = 0
if ins != 0:
cpi = cyc/float(ins)
print "%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins)
def trace_end():
pass
# XXX trace_end callback could be used as an alternative place
# to compute same values as in the script above:
#
# for time in times:
# for cpu in cpus:
# for thread in threads:
# cyc = get(time, "cycles", cpu, thread)
# ins = get(time, "instructions", cpu, thread)
#
# if ins != 0:
# cpi = cyc/float(ins)
#
# print "time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi)
| gpl-2.0 |
toshywoshy/ansible | lib/ansible/modules/network/netscaler/netscaler_save_config.py | 21 | 4892 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_save_config
short_description: Save Netscaler configuration.
description:
- This module unconditionally saves the configuration on the target netscaler node.
- This module does not support check mode.
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance.
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
nsip:
description:
- The ip address of the netscaler appliance where the nitro API calls will be made.
- "The port can be specified with the colon (:). E.g. C(192.168.1.1:555)."
required: True
nitro_user:
description:
- The username with which to authenticate to the netscaler node.
required: True
nitro_pass:
description:
- The password with which to authenticate to the netscaler node.
required: True
nitro_protocol:
choices: [ 'http', 'https' ]
default: http
description:
- Which protocol to use when accessing the nitro API objects.
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
type: bool
nitro_timeout:
description:
- Time in seconds until a timeout error is thrown when establishing a new session with Netscaler.
default: 310
requirements:
- nitro python sdk
'''
EXAMPLES = '''
---
- name: Save netscaler configuration
delegate_to: localhost
netscaler_save_config:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
- name: Setup server without saving configuration
delegate_to: localhost
notify: Save configuration
netscaler_server:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
save_config: no
name: server-1
ipaddress: 192.168.1.1
# Under playbook's handlers
- name: Save configuration
delegate_to: localhost
netscaler_save_config:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: ['message 1', 'message 2']
msg:
description: Message detailing the failure reason
returned: failure
type: str
sample: "Action does not exist"
'''
import copy
try:
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import get_nitro_client, log, loglines, netscaler_common_arguments
def main():
argument_spec = copy.deepcopy(netscaler_common_arguments)
# Delete common arguments irrelevant to this module
del argument_spec['state']
del argument_spec['save_config']
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
try:
log('Saving configuration')
client.save_config()
except nitro_exception as e:
msg = "nitro exception errorcode=" + str(e.errorcode) + ",message=" + e.message
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| gpl-3.0 |
ryfeus/lambda-packs | Tensorflow/source/google/protobuf/internal/service_reflection_test.py | 75 | 5170 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.internal.service_reflection."""
__author__ = '[email protected] (Petar Petrov)'
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import unittest_pb2
from google.protobuf import service_reflection
from google.protobuf import service
class FooUnitTest(unittest.TestCase):
def testService(self):
class MockRpcChannel(service.RpcChannel):
def CallMethod(self, method, controller, request, response, callback):
self.method = method
self.controller = controller
self.request = request
callback(response)
class MockRpcController(service.RpcController):
def SetFailed(self, msg):
self.failure_message = msg
self.callback_response = None
class MyService(unittest_pb2.TestService):
pass
self.callback_response = None
def MyCallback(response):
self.callback_response = response
rpc_controller = MockRpcController()
channel = MockRpcChannel()
srvc = MyService()
srvc.Foo(rpc_controller, unittest_pb2.FooRequest(), MyCallback)
self.assertEqual('Method Foo not implemented.',
rpc_controller.failure_message)
self.assertEqual(None, self.callback_response)
rpc_controller.failure_message = None
service_descriptor = unittest_pb2.TestService.GetDescriptor()
srvc.CallMethod(service_descriptor.methods[1], rpc_controller,
unittest_pb2.BarRequest(), MyCallback)
self.assertEqual('Method Bar not implemented.',
rpc_controller.failure_message)
self.assertEqual(None, self.callback_response)
class MyServiceImpl(unittest_pb2.TestService):
def Foo(self, rpc_controller, request, done):
self.foo_called = True
def Bar(self, rpc_controller, request, done):
self.bar_called = True
srvc = MyServiceImpl()
rpc_controller.failure_message = None
srvc.Foo(rpc_controller, unittest_pb2.FooRequest(), MyCallback)
self.assertEqual(None, rpc_controller.failure_message)
self.assertEqual(True, srvc.foo_called)
rpc_controller.failure_message = None
srvc.CallMethod(service_descriptor.methods[1], rpc_controller,
unittest_pb2.BarRequest(), MyCallback)
self.assertEqual(None, rpc_controller.failure_message)
self.assertEqual(True, srvc.bar_called)
def testServiceStub(self):
class MockRpcChannel(service.RpcChannel):
def CallMethod(self, method, controller, request,
response_class, callback):
self.method = method
self.controller = controller
self.request = request
callback(response_class())
self.callback_response = None
def MyCallback(response):
self.callback_response = response
channel = MockRpcChannel()
stub = unittest_pb2.TestService_Stub(channel)
rpc_controller = 'controller'
request = 'request'
# GetDescriptor now static, still works as instance method for compatibility
self.assertEqual(unittest_pb2.TestService_Stub.GetDescriptor(),
stub.GetDescriptor())
# Invoke method.
stub.Foo(rpc_controller, request, MyCallback)
self.assertIsInstance(self.callback_response, unittest_pb2.FooResponse)
self.assertEqual(request, channel.request)
self.assertEqual(rpc_controller, channel.controller)
self.assertEqual(stub.GetDescriptor().methods[0], channel.method)
if __name__ == '__main__':
unittest.main()
| mit |
TheWardoctor/Wardoctors-repo | plugin.video.metalliq/resources/lib/meta/gui/dialogs.py | 1 | 9044 | import time
from threading import Thread, RLock
from xbmcswift2 import xbmc, xbmcgui, xbmcaddon
from meta import plugin
from settings import *
from language import get_string as _
def wait_for_dialog(dialog_id, timeout=None, interval=500):
start = time.time()
while not xbmc.getCondVisibility("Window.IsActive(%s)" % dialog_id):
if xbmc.abortRequested or (timeout and time.time() - start >= timeout):
return False
xbmc.sleep(interval)
return True
def ok(title, msg):
xbmcgui.Dialog().ok(title, msg)
def yesno(title, msg, no=_("No"), yes=_("Yes")):
return xbmcgui.Dialog().yesno(title, msg, nolabel=no, yeslabel=yes)
def select(title, items):
return xbmcgui.Dialog().select(title, items)
def multiselect(title, items):
return xbmcgui.Dialog().multiselect(title, items)
def notify(msg, title, delay, image, sound=False):
return xbmcgui.Dialog().notification(heading=title, message=msg, time=delay, icon=image, sound=False)
def select_ext(title, populator, tasks_count):
addonPath = xbmcaddon.Addon().getAddonInfo('path').decode('utf-8')
dlg = SelectorDialog("DialogSelect.xml", addonPath, title = title, populator = populator, steps=tasks_count)
with ExtendedDialogHacks():
dlg.doModal()
selection = dlg.get_selection()
del dlg
return selection
class FanArtWindow(xbmcgui.WindowDialog):
def __init__(self):
control_background = xbmcgui.ControlImage(0, 0, 1280, 720, plugin.addon.getAddonInfo('fanart'))
self.addControl(control_background)
fanart = xbmc.getInfoLabel('ListItem.Property(Fanart_Image)')
if fanart and fanart != "Fanart_Image":
control_fanart = xbmcgui.ControlImage(0, 0, 1280, 720, fanart)
self.addControl(control_fanart)
class ExtendedDialogHacks(object):
def __init__(self):
self.active = False
self.hide_progress = False
self.hide_info = False
self.autohidedialogs = plugin.get_setting(SETTING_AUTO_HIDE_DIALOGS, bool)
if self.autohidedialogs:
self.hide_progress = plugin.get_setting(SETTING_AUTO_HIDE_DIALOGS_PROGRESS, bool)
self.hide_info = plugin.get_setting(SETTING_AUTO_HIDE_DIALOGS_INFO, bool)
if not self.hide_progress and not self.hide_info:
self.autohidedialogs = False
def __enter__(self):
self.active = True
# self.numeric_keyboard = None
self.fanart_window = FanArtWindow()
## Keyboard hack
# if plugin.get_setting(SETTING_ADVANCED_KEYBOARD_HACKS, bool):
# self.numeric_keyboard = xbmcgui.Window(10109)
# Thread(target = lambda: self.numeric_keyboard.show()).start()
# wait_for_dialog('numericinput', interval=50)
# Show fanart background
self.fanart_window.show()
# Run background task
if self.autohidedialogs:
Thread(target = self.background_task).start()
def background_task(self):
xbmc.sleep(1000)
while not xbmc.abortRequested and self.active:
if self.hide_progress:
active_window = xbmcgui.getCurrentWindowDialogId()
if active_window in [10101,10151]:
xbmc.executebuiltin("Dialog.Close(%d, true)" % active_window)
if self.hide_info:
if xbmc.getCondVisibility("Window.IsActive(infodialog)"):
xbmc.executebuiltin('Dialog.Close(infodialog, true)')
xbmc.sleep(100)
def __exit__(self, exc_type, exc_value, traceback):
self.active = False
# if self.numeric_keyboard is not None:
# self.numeric_keyboard.close()
# del self.numeric_keyboard
# xbmc.executebuiltin("Dialog.Close(numericinput, true)")
self.fanart_window.close()
del self.fanart_window
class SelectorDialog(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self)
self.title = kwargs['title']
self.populator = kwargs['populator']
self.steps = kwargs['steps']
self.items = []
self.selection = None
self.insideIndex = -1
self.completed_steps = 0
self.thread = None
self.lock = RLock()
def get_selection(self):
""" get final selection """
return self.selection
def onInit(self):
# set title
self.label = self.getControl(1)
self.label.setLabel(self.title)
# Hide ok button
self.getControl(5).setVisible(False)
# Get active list
try:
self.list = self.getControl(6)
self.list.controlLeft(self.list)
self.list.controlRight(self.list)
self.getControl(3).setVisible(False)
except:
print_exc()
self.list = self.getControl(3)
self.setFocus(self.list)
# populate list
self.thread = Thread(target = self._populate)
self.thread.start()
def onAction(self, action):
if action.getId() in (9, 10, 92, 216, 247, 257, 275, 61467, 61448,):
if self.insideIndex == -1:
self.close()
else:
self._inside_root(select=self.insideIndex)
def onClick(self, controlID):
if controlID == 6 or controlID == 3:
num = self.list.getSelectedPosition()
if num >= 0:
if self.insideIndex == -1:
self._inside(num)
else:
self.selection = self.items[self.insideIndex][1][num]
self.close()
def onFocus(self, controlID):
if controlID in (3,61):
self.setFocus(self.list)
def _inside_root(self, select=-1):
with self.lock:
self.list.reset()
for source, links in self.items:
if len(links) > 1:
source += " > {0}".format(_("Found %i items") % len(links))
listitem = xbmcgui.ListItem(source)
try:
if "plugin://" in links[0]['path']: icon = xbmcaddon.Addon(id=links[0]['path'].split("/")[2]).getAddonInfo('icon')
else: icon = xbmc.translatePath("{0}/folder.jpg".format(links[0]['path'].rsplit("/",2)[0]))
listitem.setIconImage(icon)
listitem.setThumbnailImage(icon)
except: pass
self.list.addItem(listitem)
if select >= 0:
self.list.selectItem(select)
self.insideIndex = -1
def _inside(self, num):
if num == -1:
self._inside_root(select=self.insideIndex)
return
with self.lock:
source, links = self.items[num]
if len(links) == 1:
self.selection = links[0]
self.close()
return
self.list.reset()
for item in links:
listitem = xbmcgui.ListItem(item['label'])
listitem.setProperty("Path", item['path'])
try:
if "plugin://" in links[0]['path']: icon = xbmcaddon.Addon(id=links[0]['path'].split("/")[2]).getAddonInfo('icon')
else: icon = xbmc.translatePath("{0}/folder.jpg".format(links[0]['path'].rsplit("/",2)[0]))
listitem.setIconImage(icon)
listitem.setThumbnailImage(icon)
except: pass
self.list.addItem(listitem)
self.insideIndex = num
def step(self):
self.completed_steps += 1
progress = self.completed_steps * 100 / self.steps
self.label.setLabel(u"{0} - {1:d}% ({2}/{3})".format(self.title, progress, self.completed_steps, self.steps))
def _populate(self):
xbmc.sleep(500) # Delay population to let ui settle
self.label.setLabel(self.title)
for result in self.populator():
self.step()
if not result:
continue
with self.lock:
# Remember selected item
selectedItem = None
if self.insideIndex == -1:
selectedIndex = self.list.getSelectedPosition()
else:
selectedIndex = self.insideIndex
if selectedIndex >= 0:
selectedItem = self.items[selectedIndex]
# Add new item
self.items.append(result)
self.items.sort()
# Retrived new selection-index
if selectedItem is not None:
selectedIndex = self.items.index(selectedItem)
if self.insideIndex != -1:
self.insideIndex = selectedIndex
# Update only if in root
if self.insideIndex == -1:
self._inside_root(select=selectedIndex)
self.setFocus(self.list)
pass | apache-2.0 |
vefimova/rally | rally/plugins/openstack/context/cleanup/resources.py | 2 | 15782 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from boto import exception as boto_exception
from neutronclient.common import exceptions as neutron_exceptions
from saharaclient.api import base as saharaclient_base
from rally.common import log as logging
from rally.plugins.openstack.context.cleanup import base
from rally.plugins.openstack.scenarios.keystone import utils as kutils
from rally.plugins.openstack.wrappers import keystone as keystone_wrapper
LOG = logging.getLogger(__name__)
def get_order(start):
return iter(range(start, start + 99))
class SynchronizedDeletion(object):
def is_deleted(self):
return True
class QuotaMixin(SynchronizedDeletion):
def id(self):
return self.raw_resource
def delete(self):
self._manager().delete(self.raw_resource)
def list(self):
return [self.tenant_uuid] if self.tenant_uuid else []
# HEAT
@base.resource("heat", "stacks", order=100, tenant_resource=True)
class HeatStack(base.ResourceManager):
pass
# NOVA
_nova_order = get_order(200)
@base.resource("nova", "servers", order=next(_nova_order))
class NovaServer(base.ResourceManager):
def delete(self):
if getattr(self.raw_resource, "OS-EXT-STS:locked", False):
self.raw_resource.unlock()
super(NovaServer, self).delete()
@base.resource("nova", "keypairs", order=next(_nova_order))
class NovaKeypair(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("nova", "security_groups", order=next(_nova_order))
class NovaSecurityGroup(SynchronizedDeletion, base.ResourceManager):
def list(self):
return filter(lambda x: x.name != "default",
super(NovaSecurityGroup, self).list())
@base.resource("nova", "quotas", order=next(_nova_order),
admin_required=True, tenant_resource=True)
class NovaQuotas(QuotaMixin, base.ResourceManager):
pass
@base.resource("nova", "floating_ips_bulk", order=next(_nova_order),
admin_required=True)
class NovaFloatingIpsBulk(SynchronizedDeletion, base.ResourceManager):
def id(self):
return self.raw_resource.address
def list(self):
return [floating_ip for floating_ip in self._manager().list()
if floating_ip.pool.startswith("rally_fip_pool_")]
@base.resource("nova", "networks", order=next(_nova_order),
admin_required=True)
class NovaNetworks(SynchronizedDeletion, base.ResourceManager):
def list(self):
return [net for net in self._manager().list()
if net.label.startswith("rally_novanet")]
# EC2
_ec2_order = get_order(250)
class EC2Mixin(object):
def _manager(self):
return getattr(self.user, self._service)()
@base.resource("ec2", "servers", order=next(_ec2_order))
class EC2Server(EC2Mixin, base.ResourceManager):
def is_deleted(self):
try:
instances = self._manager().get_only_instances(
instance_ids=[self.id()])
except boto_exception.EC2ResponseError as e:
# NOTE(wtakase): Nova EC2 API returns 'InvalidInstanceID.NotFound'
# if instance not found. In this case, we consider
# instance has already been deleted.
return getattr(e, "error_code") == "InvalidInstanceID.NotFound"
# NOTE(wtakase): After instance deletion, instance can be 'terminated'
# state. If all instance states are 'terminated', this
# returns True. And if get_only_instaces() returns empty
# list, this also returns True because we consider
# instance has already been deleted.
return all(map(lambda i: i.state == "terminated", instances))
def delete(self):
self._manager().terminate_instances(instance_ids=[self.id()])
def list(self):
return self._manager().get_only_instances()
# NEUTRON
_neutron_order = get_order(300)
@base.resource(service=None, resource=None, admin_required=True)
class NeutronMixin(SynchronizedDeletion, base.ResourceManager):
# Neutron has the best client ever, so we need to override everything
def _manager(self):
client = self._admin_required and self.admin or self.user
return getattr(client, self._service)()
def id(self):
return self.raw_resource["id"]
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
delete_method(self.id())
def list(self):
resources = self._resource + "s"
list_method = getattr(self._manager(), "list_%s" % resources)
return filter(lambda r: r["tenant_id"] == self.tenant_uuid,
list_method({"tenant_id": self.tenant_uuid})[resources])
@base.resource("neutron", "port", order=next(_neutron_order),
tenant_resource=True)
class NeutronPort(NeutronMixin):
def delete(self):
if (self.raw_resource["device_owner"] == "network:router_interface" or
self.raw_resource["device_owner"] ==
"network:router_interface_distributed"):
self._manager().remove_interface_router(
self.raw_resource["device_id"],
{"port_id": self.raw_resource["id"]})
else:
try:
self._manager().delete_port(self.id())
except neutron_exceptions.PortNotFoundClient:
# Port can be already auto-deleted, skip silently
LOG.debug("Port %s was not deleted. Skip silently because "
"port can be already auto-deleted."
% self.id())
@base.resource("neutron", "router", order=next(_neutron_order),
tenant_resource=True)
class NeutronRouter(NeutronMixin):
pass
@base.resource("neutron", "pool", order=next(_neutron_order),
tenant_resource=True)
class NeutronV1Pool(NeutronMixin):
pass
@base.resource("neutron", "subnet", order=next(_neutron_order),
tenant_resource=True)
class NeutronSubnet(NeutronMixin):
pass
@base.resource("neutron", "network", order=next(_neutron_order),
tenant_resource=True)
class NeutronNetwork(NeutronMixin):
pass
@base.resource("neutron", "quota", order=next(_neutron_order),
admin_required=True, tenant_resource=True)
class NeutronQuota(QuotaMixin, NeutronMixin):
def delete(self):
self._manager().delete_quota(self.tenant_uuid)
# CINDER
_cinder_order = get_order(400)
@base.resource("cinder", "backups", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeBackup(base.ResourceManager):
pass
@base.resource("cinder", "volume_snapshots", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeSnapshot(base.ResourceManager):
pass
@base.resource("cinder", "transfers", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeTransfer(base.ResourceManager):
pass
@base.resource("cinder", "volumes", order=next(_cinder_order),
tenant_resource=True)
class CinderVolume(base.ResourceManager):
pass
@base.resource("cinder", "quotas", order=next(_cinder_order),
admin_required=True, tenant_resource=True)
class CinderQuotas(QuotaMixin, base.ResourceManager):
pass
# MANILA
_manila_order = get_order(450)
@base.resource("manila", "shares", order=next(_manila_order),
tenant_resource=True)
class ManilaShare(base.ResourceManager):
pass
@base.resource("manila", "share_networks", order=next(_manila_order),
tenant_resource=True)
class ManilaShareNetwork(base.ResourceManager):
pass
@base.resource("manila", "security_services", order=next(_manila_order),
tenant_resource=True)
class ManilaSecurityService(base.ResourceManager):
pass
# GLANCE
@base.resource("glance", "images", order=500, tenant_resource=True)
class GlanceImage(base.ResourceManager):
def list(self):
return self._manager().list(owner=self.tenant_uuid)
# SAHARA
_sahara_order = get_order(600)
@base.resource("sahara", "job_executions", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobExecution(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "jobs", order=next(_sahara_order),
tenant_resource=True)
class SaharaJob(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "job_binary_internals", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobBinaryInternals(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "job_binaries", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobBinary(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "data_sources", order=next(_sahara_order),
tenant_resource=True)
class SaharaDataSource(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "clusters", order=next(_sahara_order),
tenant_resource=True)
class SaharaCluster(base.ResourceManager):
# Need special treatment for Sahara Cluster because of the way the
# exceptions are described in:
# https://github.com/openstack/python-saharaclient/blob/master/
# saharaclient/api/base.py#L145
def is_deleted(self):
try:
self._manager().get(self.id())
return False
except saharaclient_base.APIException as e:
return e.error_code == 404
@base.resource("sahara", "cluster_templates", order=next(_sahara_order),
tenant_resource=True)
class SaharaClusterTemplate(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "node_group_templates", order=next(_sahara_order),
tenant_resource=True)
class SaharaNodeGroup(SynchronizedDeletion, base.ResourceManager):
pass
# CEILOMETER
@base.resource("ceilometer", "alarms", order=700, tenant_resource=True)
class CeilometerAlarms(SynchronizedDeletion, base.ResourceManager):
def id(self):
return self.raw_resource.alarm_id
def list(self):
query = [{
"field": "project_id",
"op": "eq",
"value": self.tenant_uuid
}]
return self._manager().list(q=query)
# ZAQAR
@base.resource("zaqar", "queues", order=800)
class ZaqarQueues(SynchronizedDeletion, base.ResourceManager):
def list(self):
return self.user.zaqar().queues()
# DESIGNATE
_designate_order = get_order(900)
@base.resource("designate", "domains", order=next(_designate_order))
class Designate(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("designate", "servers", order=next(_designate_order),
admin_required=True, perform_for_admin_only=True)
class DesignateServer(SynchronizedDeletion, base.ResourceManager):
pass
# SWIFT
_swift_order = get_order(1000)
class SwiftMixin(SynchronizedDeletion, base.ResourceManager):
def _manager(self):
client = self._admin_required and self.admin or self.user
return getattr(client, self._service)()
def id(self):
return self.raw_resource
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
# NOTE(weiwu): *self.raw_resource is required because for deleting
# container we are passing only container name, to delete object we
# should pass as first argument container and second is object name.
delete_method(*self.raw_resource)
@base.resource("swift", "object", order=next(_swift_order),
tenant_resource=True)
class SwiftObject(SwiftMixin):
def list(self):
object_list = []
containers = self._manager().get_account(full_listing=True)[1]
for con in containers:
objects = self._manager().get_container(con["name"],
full_listing=True)[1]
for obj in objects:
raw_resource = [con["name"], obj["name"]]
object_list.append(raw_resource)
return object_list
@base.resource("swift", "container", order=next(_swift_order),
tenant_resource=True)
class SwiftContainer(SwiftMixin):
def list(self):
containers = self._manager().get_account(full_listing=True)[1]
return [[con["name"]] for con in containers]
# MISTRAL
@base.resource("mistral", "workbooks", order=1100, tenant_resource=True)
class MistralWorkbooks(SynchronizedDeletion, base.ResourceManager):
def delete(self):
self._manager().delete(self.raw_resource.name)
# MURANO
_murano_order = get_order(1200)
@base.resource("murano", "environments", tenant_resource=True,
order=next(_murano_order))
class MuranoEnvironments(base.ResourceManager):
pass
@base.resource("murano", "packages", tenant_resource=True,
order=next(_murano_order))
class MuranoPackages(base.ResourceManager):
def list(self):
return filter(lambda x: x.name != "Core library",
super(MuranoPackages, self).list())
# IRONIC
_ironic_order = get_order(1300)
@base.resource("ironic", "node", admin_required=True,
order=next(_ironic_order), perform_for_admin_only=True)
class IronicNodes(base.ResourceManager):
def id(self):
return self.raw_resource.uuid
# KEYSTONE
_keystone_order = get_order(9000)
class KeystoneMixin(SynchronizedDeletion):
def _manager(self):
return keystone_wrapper.wrap(getattr(self.admin, self._service)())
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
delete_method(self.id())
def list(self):
# TODO(boris-42): We should use such stuff in all list commands.
resources = self._resource + "s"
list_method = getattr(self._manager(), "list_%s" % resources)
return filter(kutils.is_temporary, list_method())
@base.resource("keystone", "user", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneUser(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "project", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneProject(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "service", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneService(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "role", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneRole(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "ec2", tenant_resource=True,
order=next(_keystone_order))
class KeystoneEc2(SynchronizedDeletion, base.ResourceManager):
def list(self):
return self._manager().list(self.raw_resource)
| apache-2.0 |
mhbu50/erpnext | erpnext/stock/doctype/material_request/material_request.py | 2 | 19170 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# ERPNext - web based ERP (http://erpnext.com)
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe.utils import cstr, flt, getdate, new_line_sep, nowdate, add_days, get_link_to_form
from frappe import msgprint, _
from frappe.model.mapper import get_mapped_doc
from erpnext.stock.stock_balance import update_bin_qty, get_indented_qty
from erpnext.controllers.buying_controller import BuyingController
from erpnext.manufacturing.doctype.work_order.work_order import get_item_details
from erpnext.buying.utils import check_on_hold_or_closed_status, validate_for_items
from erpnext.stock.doctype.item.item import get_item_defaults
from six import string_types
form_grid_templates = {
"items": "templates/form_grid/material_request_grid.html"
}
class MaterialRequest(BuyingController):
def get_feed(self):
return _("{0}: {1}").format(self.status, self.material_request_type)
def check_if_already_pulled(self):
pass
def validate_qty_against_so(self):
so_items = {} # Format --> {'SO/00001': {'Item/001': 120, 'Item/002': 24}}
for d in self.get('items'):
if d.sales_order:
if not d.sales_order in so_items:
so_items[d.sales_order] = {d.item_code: flt(d.qty)}
else:
if not d.item_code in so_items[d.sales_order]:
so_items[d.sales_order][d.item_code] = flt(d.qty)
else:
so_items[d.sales_order][d.item_code] += flt(d.qty)
for so_no in so_items.keys():
for item in so_items[so_no].keys():
already_indented = frappe.db.sql("""select sum(qty)
from `tabMaterial Request Item`
where item_code = %s and sales_order = %s and
docstatus = 1 and parent != %s""", (item, so_no, self.name))
already_indented = already_indented and flt(already_indented[0][0]) or 0
actual_so_qty = frappe.db.sql("""select sum(stock_qty) from `tabSales Order Item`
where parent = %s and item_code = %s and docstatus = 1""", (so_no, item))
actual_so_qty = actual_so_qty and flt(actual_so_qty[0][0]) or 0
if actual_so_qty and (flt(so_items[so_no][item]) + already_indented > actual_so_qty):
frappe.throw(_("Material Request of maximum {0} can be made for Item {1} against Sales Order {2}").format(actual_so_qty - already_indented, item, so_no))
# Validate
# ---------------------
def validate(self):
super(MaterialRequest, self).validate()
self.validate_schedule_date()
self.check_for_on_hold_or_closed_status('Sales Order', 'sales_order')
self.validate_uom_is_integer("uom", "qty")
if not self.status:
self.status = "Draft"
from erpnext.controllers.status_updater import validate_status
validate_status(self.status,
["Draft", "Submitted", "Stopped", "Cancelled", "Pending",
"Partially Ordered", "Ordered", "Issued", "Transferred", "Received"])
validate_for_items(self)
self.set_title()
# self.validate_qty_against_so()
# NOTE: Since Item BOM and FG quantities are combined, using current data, it cannot be validated
# Though the creation of Material Request from a Production Plan can be rethought to fix this
def set_title(self):
'''Set title as comma separated list of items'''
if not self.title:
items = ', '.join([d.item_name for d in self.items][:3])
self.title = _('{0} Request for {1}').format(self.material_request_type, items)[:100]
def on_submit(self):
# frappe.db.set(self, 'status', 'Submitted')
self.update_requested_qty()
self.update_requested_qty_in_production_plan()
if self.material_request_type == 'Purchase':
self.validate_budget()
def before_save(self):
self.set_status(update=True)
def before_submit(self):
self.set_status(update=True)
def before_cancel(self):
# if MRQ is already closed, no point saving the document
check_on_hold_or_closed_status(self.doctype, self.name)
self.set_status(update=True, status='Cancelled')
def check_modified_date(self):
mod_db = frappe.db.sql("""select modified from `tabMaterial Request` where name = %s""",
self.name)
date_diff = frappe.db.sql("""select TIMEDIFF('%s', '%s')"""
% (mod_db[0][0], cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(_(self.doctype), self.name))
def update_status(self, status):
self.check_modified_date()
self.status_can_change(status)
self.set_status(update=True, status=status)
self.update_requested_qty()
def status_can_change(self, status):
"""
validates that `status` is acceptable for the present controller status
and throws an Exception if otherwise.
"""
if self.status and self.status == 'Cancelled':
# cancelled documents cannot change
if status != self.status:
frappe.throw(
_("{0} {1} is cancelled so the action cannot be completed").
format(_(self.doctype), self.name),
frappe.InvalidStatusError
)
elif self.status and self.status == 'Draft':
# draft document to pending only
if status != 'Pending':
frappe.throw(
_("{0} {1} has not been submitted so the action cannot be completed").
format(_(self.doctype), self.name),
frappe.InvalidStatusError
)
def on_cancel(self):
self.update_requested_qty()
self.update_requested_qty_in_production_plan()
def update_completed_qty(self, mr_items=None, update_modified=True):
if self.material_request_type == "Purchase":
return
if not mr_items:
mr_items = [d.name for d in self.get("items")]
for d in self.get("items"):
if d.name in mr_items:
if self.material_request_type in ("Material Issue", "Material Transfer", "Customer Provided"):
d.ordered_qty = flt(frappe.db.sql("""select sum(transfer_qty)
from `tabStock Entry Detail` where material_request = %s
and material_request_item = %s and docstatus = 1""",
(self.name, d.name))[0][0])
if d.ordered_qty and d.ordered_qty > d.stock_qty:
frappe.throw(_("The total Issue / Transfer quantity {0} in Material Request {1} \
cannot be greater than requested quantity {2} for Item {3}").format(d.ordered_qty, d.parent, d.qty, d.item_code))
elif self.material_request_type == "Manufacture":
d.ordered_qty = flt(frappe.db.sql("""select sum(qty)
from `tabWork Order` where material_request = %s
and material_request_item = %s and docstatus = 1""",
(self.name, d.name))[0][0])
frappe.db.set_value(d.doctype, d.name, "ordered_qty", d.ordered_qty)
self._update_percent_field({
"target_dt": "Material Request Item",
"target_parent_dt": self.doctype,
"target_parent_field": "per_ordered",
"target_ref_field": "stock_qty",
"target_field": "ordered_qty",
"name": self.name,
}, update_modified)
def update_requested_qty(self, mr_item_rows=None):
"""update requested qty (before ordered_qty is updated)"""
item_wh_list = []
for d in self.get("items"):
if (not mr_item_rows or d.name in mr_item_rows) and [d.item_code, d.warehouse] not in item_wh_list \
and d.warehouse and frappe.db.get_value("Item", d.item_code, "is_stock_item") == 1 :
item_wh_list.append([d.item_code, d.warehouse])
for item_code, warehouse in item_wh_list:
update_bin_qty(item_code, warehouse, {
"indented_qty": get_indented_qty(item_code, warehouse)
})
def update_requested_qty_in_production_plan(self):
production_plans = []
for d in self.get('items'):
if d.production_plan and d.material_request_plan_item:
qty = d.qty if self.docstatus == 1 else 0
frappe.db.set_value('Material Request Plan Item',
d.material_request_plan_item, 'requested_qty', qty)
if d.production_plan not in production_plans:
production_plans.append(d.production_plan)
for production_plan in production_plans:
doc = frappe.get_doc('Production Plan', production_plan)
doc.set_status()
doc.db_set('status', doc.status)
def update_completed_and_requested_qty(stock_entry, method):
if stock_entry.doctype == "Stock Entry":
material_request_map = {}
for d in stock_entry.get("items"):
if d.material_request:
material_request_map.setdefault(d.material_request, []).append(d.material_request_item)
for mr, mr_item_rows in material_request_map.items():
if mr and mr_item_rows:
mr_obj = frappe.get_doc("Material Request", mr)
if mr_obj.status in ["Stopped", "Cancelled"]:
frappe.throw(_("{0} {1} is cancelled or stopped").format(_("Material Request"), mr),
frappe.InvalidStatusError)
mr_obj.update_completed_qty(mr_item_rows)
mr_obj.update_requested_qty(mr_item_rows)
def set_missing_values(source, target_doc):
if target_doc.doctype == "Purchase Order" and getdate(target_doc.schedule_date) < getdate(nowdate()):
target_doc.schedule_date = None
target_doc.run_method("set_missing_values")
target_doc.run_method("calculate_taxes_and_totals")
def update_item(obj, target, source_parent):
target.conversion_factor = obj.conversion_factor
target.qty = flt(flt(obj.stock_qty) - flt(obj.ordered_qty))/ target.conversion_factor
target.stock_qty = (target.qty * target.conversion_factor)
if getdate(target.schedule_date) < getdate(nowdate()):
target.schedule_date = None
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context.update({
'show_sidebar': True,
'show_search': True,
'no_breadcrumbs': True,
'title': _('Material Request'),
})
return list_context
@frappe.whitelist()
def update_status(name, status):
material_request = frappe.get_doc('Material Request', name)
material_request.check_permission('write')
material_request.update_status(status)
@frappe.whitelist()
def make_purchase_order(source_name, target_doc=None):
def postprocess(source, target_doc):
if frappe.flags.args and frappe.flags.args.default_supplier:
# items only for given default supplier
supplier_items = []
for d in target_doc.items:
default_supplier = get_item_defaults(d.item_code, target_doc.company).get('default_supplier')
if frappe.flags.args.default_supplier == default_supplier:
supplier_items.append(d)
target_doc.items = supplier_items
set_missing_values(source, target_doc)
def select_item(d):
return d.ordered_qty < d.stock_qty
doclist = get_mapped_doc("Material Request", source_name, {
"Material Request": {
"doctype": "Purchase Order",
"validation": {
"docstatus": ["=", 1],
"material_request_type": ["=", "Purchase"]
}
},
"Material Request Item": {
"doctype": "Purchase Order Item",
"field_map": [
["name", "material_request_item"],
["parent", "material_request"],
["uom", "stock_uom"],
["uom", "uom"],
["sales_order", "sales_order"],
["sales_order_item", "sales_order_item"]
],
"postprocess": update_item,
"condition": select_item
}
}, target_doc, postprocess)
return doclist
@frappe.whitelist()
def make_request_for_quotation(source_name, target_doc=None):
doclist = get_mapped_doc("Material Request", source_name, {
"Material Request": {
"doctype": "Request for Quotation",
"validation": {
"docstatus": ["=", 1],
"material_request_type": ["=", "Purchase"]
}
},
"Material Request Item": {
"doctype": "Request for Quotation Item",
"field_map": [
["name", "material_request_item"],
["parent", "material_request"],
["uom", "uom"]
]
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_purchase_order_based_on_supplier(source_name, target_doc=None, args=None):
mr = source_name
supplier_items = get_items_based_on_default_supplier(args.get("supplier"))
def postprocess(source, target_doc):
target_doc.supplier = args.get("supplier")
if getdate(target_doc.schedule_date) < getdate(nowdate()):
target_doc.schedule_date = None
target_doc.set("items", [d for d in target_doc.get("items")
if d.get("item_code") in supplier_items and d.get("qty") > 0])
set_missing_values(source, target_doc)
target_doc = get_mapped_doc("Material Request", mr, {
"Material Request": {
"doctype": "Purchase Order",
},
"Material Request Item": {
"doctype": "Purchase Order Item",
"field_map": [
["name", "material_request_item"],
["parent", "material_request"],
["uom", "stock_uom"],
["uom", "uom"]
],
"postprocess": update_item,
"condition": lambda doc: doc.ordered_qty < doc.qty
}
}, target_doc, postprocess)
return target_doc
@frappe.whitelist()
def get_items_based_on_default_supplier(supplier):
supplier_items = [d.parent for d in frappe.db.get_all("Item Default",
{"default_supplier": supplier, "parenttype": "Item"}, 'parent')]
return supplier_items
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def get_material_requests_based_on_supplier(doctype, txt, searchfield, start, page_len, filters):
conditions = ""
if txt:
conditions += "and mr.name like '%%"+txt+"%%' "
if filters.get("transaction_date"):
date = filters.get("transaction_date")[1]
conditions += "and mr.transaction_date between '{0}' and '{1}' ".format(date[0], date[1])
supplier = filters.get("supplier")
supplier_items = get_items_based_on_default_supplier(supplier)
if not supplier_items:
frappe.throw(_("{0} is not the default supplier for any items.").format(supplier))
material_requests = frappe.db.sql("""select distinct mr.name, transaction_date,company
from `tabMaterial Request` mr, `tabMaterial Request Item` mr_item
where mr.name = mr_item.parent
and mr_item.item_code in ({0})
and mr.material_request_type = 'Purchase'
and mr.per_ordered < 99.99
and mr.docstatus = 1
and mr.status != 'Stopped'
and mr.company = '{1}'
{2}
order by mr_item.item_code ASC
limit {3} offset {4} """ \
.format(', '.join(['%s']*len(supplier_items)), filters.get("company"), conditions, page_len, start),
tuple(supplier_items), as_dict=1)
return material_requests
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def get_default_supplier_query(doctype, txt, searchfield, start, page_len, filters):
doc = frappe.get_doc("Material Request", filters.get("doc"))
item_list = []
for d in doc.items:
item_list.append(d.item_code)
return frappe.db.sql("""select default_supplier
from `tabItem Default`
where parent in ({0}) and
default_supplier IS NOT NULL
""".format(', '.join(['%s']*len(item_list))),tuple(item_list))
@frappe.whitelist()
def make_supplier_quotation(source_name, target_doc=None):
def postprocess(source, target_doc):
set_missing_values(source, target_doc)
doclist = get_mapped_doc("Material Request", source_name, {
"Material Request": {
"doctype": "Supplier Quotation",
"validation": {
"docstatus": ["=", 1],
"material_request_type": ["=", "Purchase"]
}
},
"Material Request Item": {
"doctype": "Supplier Quotation Item",
"field_map": {
"name": "material_request_item",
"parent": "material_request",
"sales_order": "sales_order"
}
}
}, target_doc, postprocess)
return doclist
@frappe.whitelist()
def make_stock_entry(source_name, target_doc=None):
def update_item(obj, target, source_parent):
qty = flt(flt(obj.stock_qty) - flt(obj.ordered_qty))/ target.conversion_factor \
if flt(obj.stock_qty) > flt(obj.ordered_qty) else 0
target.qty = qty
target.transfer_qty = qty * obj.conversion_factor
target.conversion_factor = obj.conversion_factor
if source_parent.material_request_type == "Material Transfer" or source_parent.material_request_type == "Customer Provided":
target.t_warehouse = obj.warehouse
else:
target.s_warehouse = obj.warehouse
if source_parent.material_request_type == "Customer Provided":
target.allow_zero_valuation_rate = 1
if source_parent.material_request_type == "Material Transfer":
target.s_warehouse = obj.from_warehouse
def set_missing_values(source, target):
target.purpose = source.material_request_type
if source.job_card:
target.purpose = 'Material Transfer for Manufacture'
if source.material_request_type == "Customer Provided":
target.purpose = "Material Receipt"
target.run_method("calculate_rate_and_amount")
target.set_stock_entry_type()
target.set_job_card_data()
doclist = get_mapped_doc("Material Request", source_name, {
"Material Request": {
"doctype": "Stock Entry",
"validation": {
"docstatus": ["=", 1],
"material_request_type": ["in", ["Material Transfer", "Material Issue", "Customer Provided"]]
}
},
"Material Request Item": {
"doctype": "Stock Entry Detail",
"field_map": {
"name": "material_request_item",
"parent": "material_request",
"uom": "stock_uom"
},
"postprocess": update_item,
"condition": lambda doc: doc.ordered_qty < doc.stock_qty
}
}, target_doc, set_missing_values)
return doclist
@frappe.whitelist()
def raise_work_orders(material_request):
mr= frappe.get_doc("Material Request", material_request)
errors =[]
work_orders = []
default_wip_warehouse = frappe.db.get_single_value("Manufacturing Settings", "default_wip_warehouse")
for d in mr.items:
if (d.stock_qty - d.ordered_qty) > 0:
if frappe.db.exists("BOM", {"item": d.item_code, "is_default": 1}):
wo_order = frappe.new_doc("Work Order")
wo_order.update({
"production_item": d.item_code,
"qty": d.stock_qty - d.ordered_qty,
"fg_warehouse": d.warehouse,
"wip_warehouse": default_wip_warehouse,
"description": d.description,
"stock_uom": d.stock_uom,
"expected_delivery_date": d.schedule_date,
"sales_order": d.sales_order,
"bom_no": get_item_details(d.item_code).bom_no,
"material_request": mr.name,
"material_request_item": d.name,
"planned_start_date": mr.transaction_date,
"company": mr.company
})
wo_order.set_work_order_operations()
wo_order.save()
work_orders.append(wo_order.name)
else:
errors.append(_("Row {0}: Bill of Materials not found for the Item {1}")
.format(d.idx, get_link_to_form("Item", d.item_code)))
if work_orders:
work_orders_list = [get_link_to_form("Work Order", d) for d in work_orders]
if len(work_orders) > 1:
msgprint(_("The following {0} were created: {1}")
.format(frappe.bold(_("Work Orders")), '<br>' + ', '.join(work_orders_list)))
else:
msgprint(_("The {0} {1} created sucessfully")
.format(frappe.bold(_("Work Order")), work_orders_list[0]))
if errors:
frappe.throw(_("Work Order cannot be created for following reason: <br> {0}")
.format(new_line_sep(errors)))
return work_orders
@frappe.whitelist()
def create_pick_list(source_name, target_doc=None):
doc = get_mapped_doc('Material Request', source_name, {
'Material Request': {
'doctype': 'Pick List',
'field_map': {
'material_request_type': 'purpose'
},
'validation': {
'docstatus': ['=', 1]
}
},
'Material Request Item': {
'doctype': 'Pick List Item',
'field_map': {
'name': 'material_request_item',
'qty': 'stock_qty'
},
},
}, target_doc)
doc.set_item_locations()
return doc
| gpl-3.0 |
hanvo/MusicCloud | Crawler/Install Files/pygame/msysio.py | 2 | 1422 | # module msysio.py
# Requires Python 2.2 or better.
"""Provide helpful routines for interactive IO on the MSYS console"""
# Output needs to be flushed to be seen. It is especially important
# when prompting for user input.
import sys
import os
__all__ = ['raw_input_', 'print_', 'is_msys']
# 2.x/3.x compatibility stuff
try:
raw_input
except NameError:
raw_input = input
# Exported functions
__all__ = ['raw_input_', 'print_', 'is_msys']
# 2.x/3.x compatibility stuff
try:
raw_input
except NameError:
raw_input = input
# Exported functions
def raw_input_(prompt=None):
"""Prompt for user input in an MSYS console friendly way"""
if prompt is None:
prompt = ''
print_(prompt, end='')
return raw_input()
def print_(*args, **kwds):
"""Print arguments in an MSYS console friendly way
Keyword arguments:
file, sep, end
"""
stream = kwds.get('file', sys.stdout)
sep = kwds.get('sep', ' ')
end = kwds.get('end', '\n')
if args:
stream.write(sep.join([str(arg) for arg in args]))
if end:
stream.write(end)
try:
stream.flush()
except AttributeError:
pass
def is_msys():
"""Return true if the execution environment is MSYS"""
try:
# Unfortunately there is no longer an MSYS specific identifier.
return os.environ['TERM'] == 'cygwin'
except KeyError:
return False
| bsd-3-clause |
jskew/gnuradio | gr-analog/python/analog/qa_quadrature_demod.py | 47 | 1844 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import cmath
from gnuradio import gr, gr_unittest, analog, blocks
class test_quadrature_demod(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_quad_demod_001(self):
f = 1000.0
fs = 8000.0
src_data = []
for i in xrange(200):
ti = i/fs
src_data.append(cmath.exp(2j*cmath.pi*f*ti))
# f/fs is a quarter turn per sample.
# Set the gain based on this to get 1 out.
gain = 1.0/(cmath.pi/4)
expected_result = [0,] + 199*[1.0]
src = blocks.vector_source_c(src_data)
op = analog.quadrature_demod_cf(gain)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 5)
if __name__ == '__main__':
gr_unittest.run(test_quadrature_demod, "test_quadrature_demod.xml")
| gpl-3.0 |
kyoren/https-github.com-h2oai-h2o-3 | py2/testdir_rapids/test_rapids_builtin.py | 21 | 7544 | import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_browse as h2b, h2o_exec as h2e, h2o_import as h2i
initList = [
'(#(1))', # why isn't this illegal?
'(#1)',
'((#1))',
'(((#1)))',
'(= !x #1)',
'((= !x #1))',
'(((= !x #1)))',
# complains
# '(= !x (#1 #2))',
# '((= !x (#1 #2)))',
# '(((= !x (#1 #2))))',
# okay. not okay if comma separated. seems wrong
'(= !x (+ #1 #2))',
'((= !x (+ #1 #2)))',
'(((= !x (+ #1 #2))))',
# complains
# '(= !x (+ #1 #2 #4))',
# '((= !x (+ #1 #2 #4)))',
# '(((= !x (+ #1 #2 #4))))',
# okay.
'(= !x + #1 #2)',
'((= !x + #1 #2))',
'(((= !x + #1 #2)))',
# '(= x + #1 #2)', # fails
# parens on binary operators
'(= !x + #1 + #1 (+ #1 #1))',
'= !x + #1 + #1 (+ #1 #1)',
'= !x N #1 N #1 (N #1 #1)',
'= !x n #1 n #1 (n #1 #1)',
'= !x L #1 L #1 (L #1 #1)',
'= !x l #1 l #1 (l #1 #1)',
'= !x G #1 G #1 (G #1 #1)',
'= !x g #1 g #1 (g #1 #1)',
'= !x (* (* #1 #1) (* #1 #1))',
'= !x * #1 * #1 (* #1 #1)',
'= !x - #1 - #1 (- #1 #1)',
'= !x ^ #1 ^ #1 (^ #1 #1)',
'= !x / #1 / #1 (/ #1 #1)',
'= !x ** #1 ** #1 (** #1 #1)',
'= !x % #1 % #1 (% #1 #1)',
# '= !x %/% #1 %/% #1 %/% #1 #1', # unimplemented
# '= !x %% #1 %% #1 %% #1 #1', # unimplemented
# '(= !x + _#1 + _#1 + _#1 _#1)', # unimplemented
'= !x _ + #1 + #1 (+ #1 _ #1)',
'= !x _ N #1 N #1 (N #1 _ #1)',
'= !x _ n #1 n #1 (n #1 _ #1)',
'= !x _ L #1 L #1 (L #1 _ #1)',
'= !x _ l #1 l #1 (l #1 _ #1)',
'= !x _ G #1 G #1 (G #1 _ #1)',
'= !x _ g #1 g #1 (g #1 _ #1)',
'= !x _ * #1 * #1 (* #1 _ #1)',
'= !x _ - #1 - #1 (- #1 _ #1)',
'= !x _ ^ #1 ^ #1 (^ #1 _ #1)',
'= !x _ / #1 / #1 (/ #1 _ #1)',
'= !x _ ** #1 ** #1 (** #1 _ #1)',
'= !x _ % #1 % #1 (% #1 _ #1)',
# can have space between ( and function
'= !x1 ( sum ([ %r1 "null" #0) %TRUE)',
'= !x2 ( sum ([ %r1 "null" #0) %TRUE)',
'= !x2a ( sum ([ %r1 "null" #0) %TRUE )',
# can have space after (
'= !x3 ( sum ([ %r1 "null" #0) %TRUE )',
'= !x3a ( sum ([ %r1 "null" #0) %TRUE )',
'= !x3b ( sum ([ %r1 "null" #0 ) %TRUE )',
'= !x4 ( sum ([ %r1 " null " #0 ) %TRUE )',
# can have space after (
'(= !x3 ( sum ([ %r1 "null" #0) %TRUE ))',
'(= !x3a ( sum ([ %r1 "null" #0) %TRUE ) )',
'(= !x3b ( sum ([ %r1 "null" #0 ) %TRUE ) )',
'((= !x4 ( sum ([ %r1 " null " #0 ) %TRUE )))',
'(= !x3 ( max ([ %r1 "null" #0) %TRUE ))',
'(= !x3a ( max ([ %r1 "null" #0) %TRUE ) )',
'(= !x3b ( max ([ %r1 "null" #0 ) %TRUE ) )',
'((= !x4 ( max ([ %r1 " null " #0 ) %TRUE )))',
'(= !x3 ( min ([ %r1 "null" #0) %TRUE ))',
'(= !x3a ( min ([ %r1 "null" #0) %TRUE ) )',
'(= !x3b ( min ([ %r1 "null" #0 ) %TRUE ) )',
'((= !x4 ( min ([ %r1 " null " #0 ) %TRUE )))',
'(= !v (c {#1;#4567;(: #91234 #9000209);(: #9000210 #45001045);45001085})',
'(= !x3 ( min ([ %r1 "null" #0) %TRUE ))',
'(= !x3 (+ (sum ([ %r1 "null" #0) %TRUE) (sum ([ %r1 "null" #0) %TRUE) )',
'(= !x3 (+ (xorsum ([ %r1 "null" #0) %TRUE) (xorsum ([ %r1 "null" #0) %TRUE) )',
'(= !x3 (+ (max ([ %r1 "null" #0) %TRUE) (max ([ %r1 "null" #0) %TRUE) )',
'(= !x3 (+ (min ([ %r1 "null" #0) %TRUE) (min ([ %r1 "null" #0) %TRUE) )',
# '{ #1 #1 }',
# '(= !x4 { #1 #1 })',
# r1[c(1,5,8,10,33),]
# commas are illegal (var name?)
# vectors can be strings or numbers only, not vars or keys
# h2o objects can't be in a vector
# should work soon
# '(= !x (c {#1;#5;#8;#10;#33}))',
# '(= !x (c {(: #0 #5) }))',
# '(= !x (c {(: #5 #5) }))',
# '(= !x (c {(: #5 #0) }))',
# space after : should be optional
# this doesn't work
# '(= !v (c { #1;#4567;(: #91234 #9000209);(: #9000210 #45001045);45001085 })',
# c(1,2,3,4)
# '= !x (sum %r1 )'
# '(= !x (xorsum ([ %r1 "null" #0) %TRUE))', # works
# 'cave=c(1.3,0,1,2,3,4,5)',
# 'ma=c(2.3,0,1,2,3,4,5)',
# 'r2.hex=c(3.3,0,1,2,3,4,5)',
# 'r3.hex=c(4.3,0,1,2,3,4,5)',
# 'r4.hex=c(5.3,0,1,2,3,4,5)',
# 'r.hex=i.hex',
]
# single operand stuff
exprList = [
'(= !x (cos ([ %r1 "null" #0) ))',
'(= !x (sin ([ %r1 "null" #0) ))',
'(= !x (tan ([ %r1 "null" #0) ))',
'(= !x (acos ([ %r1 "null" #0) ))',
'(= !x (asin ([ %r1 "null" #0) ))',
'(= !x (atan ([ %r1 "null" #0) ))',
'(= !x (cosh ([ %r1 "null" #0) ))',
'(= !x (sinh ([ %r1 "null" #0) ))',
'(= !x (tanh ([ %r1 "null" #0) ))',
'(= !x (abs ([ %r1 "null" #0) ))',
'(= !x (sign ([ %r1 "null" #0) ))',
'(= !x (sqrt ([ %r1 "null" #0) ))',
'(= !x (log ([ %r1 "null" #0) ))',
'(= !x (exp ([ %r1 "null" #0) ))',
'(= !x (is.na ([ %r1 "null" #0) ))',
# FIX! these don't work in h2o-dev?
# '(= !x (ceil ([ %r1 "null" #0) ))',
# '(= !x (floor ([ %r1 "null" #0) ))',
'(= !x (length ([ %r1 "null" #0) ))',
# '(= !x (scale ([ %r1 "null" #0) ))',
# '(= !x (table ([ %r1 "null" #0) ))',
# '(= !x (unique ([ %r1 "null" #0) ))',
# '(= !x (factor ([ %r1 "null" #0) ))',
# '(= !x (nrow ([ %r1 "null" #0) ))',
# '(= !x (sd ([ %r1 "null" #0) ))',
# '(= !x (ncol ([ %r1 "null" #0) ))',
'(= !x (is.factor ([ %r1 "null" #0) ))',
'(= !x (any.factor ([ %r1 "null" #0) ))',
'(= !x (any.na ([ %r1 "null" #0) ))',
# '(= !x (isTrue ([ %r1 "null" #0) ))',
# '(= !x (head ([ %r1 "null" #0) ))',
# '(= !x (tail ([ %r1 "null" #0) ))',
# 1 operand
# '(= !x (seq_len #0.1))',
# 2 operands
'(= !x (round ([ %r1 "null" #0) #1))',
# '(= !x (trunc ([ %r1 "null" #0) #1))',
# '(= !x (signif ([ %r1 "null" #0) #1))',
# FIX! gets AIOOBE
# '(= !x (cut ([ %r1 "null" #0) #2))',
# '(= !x (rep_len #0.1 #10))',
]
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rapids_builtin(self):
bucket = 'smalldata'
csvPathname = 'iris/iris_wheader.csv'
hexKey = 'r1'
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=hexKey)
bad = []
for execExpr in exprList:
try:
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=None, timeoutSecs=4)
except:
# assert 1==0
bad.append(execExpr)
print "\nbad:"
for b in bad:
print b
# for execExpr in exprList:
# h2e.exec_expr(execExpr=execExpr, resultKey=None, timeoutSecs=10)
# h2o.check_sandbox_for_errors()
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
mschwerin/linux-3.0.35-qmx6 | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
rancher/cattle | tests/integration/cattletest/core/test_user_preferences.py | 7 | 3604 | from common_fixtures import * # NOQA
from gdapi import ApiError
@pytest.fixture(scope='module')
def user_client(context):
return context.user_client
def _user_preference(client, name=None):
if name is None:
name = random_str()
preference = client.wait_success(client.create_user_preference(
name=name, value=random_str()))
got_preference = client.by_id('userPreference', preference.id)
assert preference.id == got_preference.id
assert name == got_preference.name
assert preference.value == got_preference.value
return got_preference
def make_prefs(client):
pref_ids = []
for x in range(0, 5):
pref_ids.append(
_user_preference(client, name=random_str()).id)
return set(pref_ids)
def get_prefs_ids(client, all=False):
pref_ids = []
for pref in client.list_user_preference(all=all):
pref_ids.append(pref.id)
return set(pref_ids)
def test_create_user_preference(user_client):
_user_preference(user_client)
def test_delete_user_preference(user_client):
preference = _user_preference(user_client)
preference = user_client.wait_success(preference.deactivate())
preference = user_client.wait_success(preference.remove())
preference = user_client.wait_success(preference.purge())
preference = user_client.by_id('userPreference', preference.id)
assert preference.state == 'purged'
preference = _user_preference(user_client)
preference = user_client.wait_success(preference.remove())
assert preference.state == 'removed'
preference = user_client.wait_success(preference.purge())
assert preference.state == 'purged'
def test_update_user_preference(user_client):
preference = _user_preference(user_client)
new_value = random_str()
user_client.update(preference, value=new_value)
got_preference = user_client.by_id('userPreference', preference.id)
assert got_preference.value == new_value
def test_update_user_preference_pass_name(user_client):
preference = _user_preference(user_client)
new_value = random_str()
user_client.update(preference, name=preference.name, value=new_value)
got_preference = user_client.by_id('userPreference', preference.id)
assert got_preference.value == new_value
def test_unique_user_preference(user_client, new_context):
rand_str = random_str()
_user_preference(user_client, name=rand_str)
with pytest.raises(ApiError) as e:
_user_preference(user_client, name=rand_str)
assert e.value.error.status == 422
_user_preference(new_context.user_client, name=rand_str)
with pytest.raises(ApiError) as e:
_user_preference(new_context.user_client, name=rand_str)
assert e.value.error.status == 422
def test_all_filter_user_preference(admin_user_client, request):
ctx1 = new_context(admin_user_client, request)
ctx2 = new_context(admin_user_client, request)
ctx1_prefs = make_prefs(ctx1.user_client)
ctx2_prefs = make_prefs(ctx2.user_client)
got_ctx1_prefs = get_prefs_ids(ctx1.user_client)
got_ctx2_prefs = get_prefs_ids(ctx2.user_client)
assert len(ctx1_prefs & got_ctx1_prefs) == len(ctx1_prefs)
assert len(ctx2_prefs & got_ctx2_prefs) == len(ctx2_prefs)
assert len(got_ctx1_prefs & got_ctx2_prefs) == 0
admin_prefs = get_prefs_ids(admin_user_client)
all_prefs = get_prefs_ids(admin_user_client, all=True)
assert len(admin_prefs) != len(all_prefs)
assert admin_prefs <= all_prefs
assert ctx1_prefs | ctx2_prefs <= all_prefs
assert len((ctx1_prefs | ctx2_prefs) & admin_prefs) == 0
| apache-2.0 |
Eric-Zhong/odoo | addons/sale_order_dates/__init__.py | 441 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_order_dates
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
KAsante95/osf.io | website/identifiers/model.py | 58 | 1631 | # -*- coding: utf-8 -*-
from bson import ObjectId
from modularodm import Q
from modularodm import fields
from modularodm.storage.base import KeyExistsException
from framework.mongo import StoredObject
from framework.mongo.utils import unique_on
@unique_on(['referent.0', 'referent.1', 'category'])
class Identifier(StoredObject):
"""A persistent identifier model for DOIs, ARKs, and the like."""
_id = fields.StringField(default=lambda: str(ObjectId()))
# object to which the identifier points
referent = fields.AbstractForeignField(required=True)
# category: e.g. 'ark', 'doi'
category = fields.StringField(required=True)
# value: e.g. 'FK424601'
value = fields.StringField(required=True)
class IdentifierMixin(object):
"""Model mixin that adds methods for getting and setting Identifier objects
for model objects.
"""
def get_identifier(self, category):
identifiers = Identifier.find(
Q('referent', 'eq', self) &
Q('category', 'eq', category)
)
return identifiers[0] if identifiers else None
def get_identifier_value(self, category):
identifier = self.get_identifier(category)
return identifier.value if identifier else None
def set_identifier_value(self, category, value):
try:
identifier = Identifier(referent=self, category=category, value=value)
identifier.save()
except KeyExistsException:
identifier = self.get_identifier(category)
assert identifier is not None
identifier.value = value
identifier.save()
| apache-2.0 |
ressu/SickGear | lib/requests/packages/chardet/gb2312freq.py | 3132 | 36011 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# GB2312 most frequently used character table
#
# Char to FreqOrder table , from hz6763
# 512 --> 0.79 -- 0.79
# 1024 --> 0.92 -- 0.13
# 2048 --> 0.98 -- 0.06
# 6768 --> 1.00 -- 0.02
#
# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
# Random Distribution Ration = 512 / (3755 - 512) = 0.157
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760
GB2312CharToFreqOrder = (
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, # last 512
#Everything below is of no interest for detection purpose
5508,6484,3900,3414,3974,4441,4024,3537,4037,5628,5099,3633,6485,3148,6486,3636,
5509,3257,5510,5973,5445,5872,4941,4403,3174,4627,5873,6276,2286,4230,5446,5874,
5122,6102,6103,4162,5447,5123,5323,4849,6277,3980,3851,5066,4246,5774,5067,6278,
3001,2807,5695,3346,5775,5974,5158,5448,6487,5975,5976,5776,3598,6279,5696,4806,
4211,4154,6280,6488,6489,6490,6281,4212,5037,3374,4171,6491,4562,4807,4722,4827,
5977,6104,4532,4079,5159,5324,5160,4404,3858,5359,5875,3975,4288,4610,3486,4512,
5325,3893,5360,6282,6283,5560,2522,4231,5978,5186,5449,2569,3878,6284,5401,3578,
4415,6285,4656,5124,5979,2506,4247,4449,3219,3417,4334,4969,4329,6492,4576,4828,
4172,4416,4829,5402,6286,3927,3852,5361,4369,4830,4477,4867,5876,4173,6493,6105,
4657,6287,6106,5877,5450,6494,4155,4868,5451,3700,5629,4384,6288,6289,5878,3189,
4881,6107,6290,6495,4513,6496,4692,4515,4723,5100,3356,6497,6291,3810,4080,5561,
3570,4430,5980,6498,4355,5697,6499,4724,6108,6109,3764,4050,5038,5879,4093,3226,
6292,5068,5217,4693,3342,5630,3504,4831,4377,4466,4309,5698,4431,5777,6293,5778,
4272,3706,6110,5326,3752,4676,5327,4273,5403,4767,5631,6500,5699,5880,3475,5039,
6294,5562,5125,4348,4301,4482,4068,5126,4593,5700,3380,3462,5981,5563,3824,5404,
4970,5511,3825,4738,6295,6501,5452,4516,6111,5881,5564,6502,6296,5982,6503,4213,
4163,3454,6504,6112,4009,4450,6113,4658,6297,6114,3035,6505,6115,3995,4904,4739,
4563,4942,4110,5040,3661,3928,5362,3674,6506,5292,3612,4791,5565,4149,5983,5328,
5259,5021,4725,4577,4564,4517,4364,6298,5405,4578,5260,4594,4156,4157,5453,3592,
3491,6507,5127,5512,4709,4922,5984,5701,4726,4289,6508,4015,6116,5128,4628,3424,
4241,5779,6299,4905,6509,6510,5454,5702,5780,6300,4365,4923,3971,6511,5161,3270,
3158,5985,4100, 867,5129,5703,6117,5363,3695,3301,5513,4467,6118,6512,5455,4232,
4242,4629,6513,3959,4478,6514,5514,5329,5986,4850,5162,5566,3846,4694,6119,5456,
4869,5781,3779,6301,5704,5987,5515,4710,6302,5882,6120,4392,5364,5705,6515,6121,
6516,6517,3736,5988,5457,5989,4695,2457,5883,4551,5782,6303,6304,6305,5130,4971,
6122,5163,6123,4870,3263,5365,3150,4871,6518,6306,5783,5069,5706,3513,3498,4409,
5330,5632,5366,5458,5459,3991,5990,4502,3324,5991,5784,3696,4518,5633,4119,6519,
4630,5634,4417,5707,4832,5992,3418,6124,5993,5567,4768,5218,6520,4595,3458,5367,
6125,5635,6126,4202,6521,4740,4924,6307,3981,4069,4385,6308,3883,2675,4051,3834,
4302,4483,5568,5994,4972,4101,5368,6309,5164,5884,3922,6127,6522,6523,5261,5460,
5187,4164,5219,3538,5516,4111,3524,5995,6310,6311,5369,3181,3386,2484,5188,3464,
5569,3627,5708,6524,5406,5165,4677,4492,6312,4872,4851,5885,4468,5996,6313,5709,
5710,6128,2470,5886,6314,5293,4882,5785,3325,5461,5101,6129,5711,5786,6525,4906,
6526,6527,4418,5887,5712,4808,2907,3701,5713,5888,6528,3765,5636,5331,6529,6530,
3593,5889,3637,4943,3692,5714,5787,4925,6315,6130,5462,4405,6131,6132,6316,5262,
6531,6532,5715,3859,5716,5070,4696,5102,3929,5788,3987,4792,5997,6533,6534,3920,
4809,5000,5998,6535,2974,5370,6317,5189,5263,5717,3826,6536,3953,5001,4883,3190,
5463,5890,4973,5999,4741,6133,6134,3607,5570,6000,4711,3362,3630,4552,5041,6318,
6001,2950,2953,5637,4646,5371,4944,6002,2044,4120,3429,6319,6537,5103,4833,6538,
6539,4884,4647,3884,6003,6004,4758,3835,5220,5789,4565,5407,6540,6135,5294,4697,
4852,6320,6321,3206,4907,6541,6322,4945,6542,6136,6543,6323,6005,4631,3519,6544,
5891,6545,5464,3784,5221,6546,5571,4659,6547,6324,6137,5190,6548,3853,6549,4016,
4834,3954,6138,5332,3827,4017,3210,3546,4469,5408,5718,3505,4648,5790,5131,5638,
5791,5465,4727,4318,6325,6326,5792,4553,4010,4698,3439,4974,3638,4335,3085,6006,
5104,5042,5166,5892,5572,6327,4356,4519,5222,5573,5333,5793,5043,6550,5639,5071,
4503,6328,6139,6551,6140,3914,3901,5372,6007,5640,4728,4793,3976,3836,4885,6552,
4127,6553,4451,4102,5002,6554,3686,5105,6555,5191,5072,5295,4611,5794,5296,6556,
5893,5264,5894,4975,5466,5265,4699,4976,4370,4056,3492,5044,4886,6557,5795,4432,
4769,4357,5467,3940,4660,4290,6141,4484,4770,4661,3992,6329,4025,4662,5022,4632,
4835,4070,5297,4663,4596,5574,5132,5409,5895,6142,4504,5192,4664,5796,5896,3885,
5575,5797,5023,4810,5798,3732,5223,4712,5298,4084,5334,5468,6143,4052,4053,4336,
4977,4794,6558,5335,4908,5576,5224,4233,5024,4128,5469,5225,4873,6008,5045,4729,
4742,4633,3675,4597,6559,5897,5133,5577,5003,5641,5719,6330,6560,3017,2382,3854,
4406,4811,6331,4393,3964,4946,6561,2420,3722,6562,4926,4378,3247,1736,4442,6332,
5134,6333,5226,3996,2918,5470,4319,4003,4598,4743,4744,4485,3785,3902,5167,5004,
5373,4394,5898,6144,4874,1793,3997,6334,4085,4214,5106,5642,4909,5799,6009,4419,
4189,3330,5899,4165,4420,5299,5720,5227,3347,6145,4081,6335,2876,3930,6146,3293,
3786,3910,3998,5900,5300,5578,2840,6563,5901,5579,6147,3531,5374,6564,6565,5580,
4759,5375,6566,6148,3559,5643,6336,6010,5517,6337,6338,5721,5902,3873,6011,6339,
6567,5518,3868,3649,5722,6568,4771,4947,6569,6149,4812,6570,2853,5471,6340,6341,
5644,4795,6342,6012,5723,6343,5724,6013,4349,6344,3160,6150,5193,4599,4514,4493,
5168,4320,6345,4927,3666,4745,5169,5903,5005,4928,6346,5725,6014,4730,4203,5046,
4948,3395,5170,6015,4150,6016,5726,5519,6347,5047,3550,6151,6348,4197,4310,5904,
6571,5581,2965,6152,4978,3960,4291,5135,6572,5301,5727,4129,4026,5905,4853,5728,
5472,6153,6349,4533,2700,4505,5336,4678,3583,5073,2994,4486,3043,4554,5520,6350,
6017,5800,4487,6351,3931,4103,5376,6352,4011,4321,4311,4190,5136,6018,3988,3233,
4350,5906,5645,4198,6573,5107,3432,4191,3435,5582,6574,4139,5410,6353,5411,3944,
5583,5074,3198,6575,6354,4358,6576,5302,4600,5584,5194,5412,6577,6578,5585,5413,
5303,4248,5414,3879,4433,6579,4479,5025,4854,5415,6355,4760,4772,3683,2978,4700,
3797,4452,3965,3932,3721,4910,5801,6580,5195,3551,5907,3221,3471,3029,6019,3999,
5908,5909,5266,5267,3444,3023,3828,3170,4796,5646,4979,4259,6356,5647,5337,3694,
6357,5648,5338,4520,4322,5802,3031,3759,4071,6020,5586,4836,4386,5048,6581,3571,
4679,4174,4949,6154,4813,3787,3402,3822,3958,3215,3552,5268,4387,3933,4950,4359,
6021,5910,5075,3579,6358,4234,4566,5521,6359,3613,5049,6022,5911,3375,3702,3178,
4911,5339,4521,6582,6583,4395,3087,3811,5377,6023,6360,6155,4027,5171,5649,4421,
4249,2804,6584,2270,6585,4000,4235,3045,6156,5137,5729,4140,4312,3886,6361,4330,
6157,4215,6158,3500,3676,4929,4331,3713,4930,5912,4265,3776,3368,5587,4470,4855,
3038,4980,3631,6159,6160,4132,4680,6161,6362,3923,4379,5588,4255,6586,4121,6587,
6363,4649,6364,3288,4773,4774,6162,6024,6365,3543,6588,4274,3107,3737,5050,5803,
4797,4522,5589,5051,5730,3714,4887,5378,4001,4523,6163,5026,5522,4701,4175,2791,
3760,6589,5473,4224,4133,3847,4814,4815,4775,3259,5416,6590,2738,6164,6025,5304,
3733,5076,5650,4816,5590,6591,6165,6592,3934,5269,6593,3396,5340,6594,5804,3445,
3602,4042,4488,5731,5732,3525,5591,4601,5196,6166,6026,5172,3642,4612,3202,4506,
4798,6366,3818,5108,4303,5138,5139,4776,3332,4304,2915,3415,4434,5077,5109,4856,
2879,5305,4817,6595,5913,3104,3144,3903,4634,5341,3133,5110,5651,5805,6167,4057,
5592,2945,4371,5593,6596,3474,4182,6367,6597,6168,4507,4279,6598,2822,6599,4777,
4713,5594,3829,6169,3887,5417,6170,3653,5474,6368,4216,2971,5228,3790,4579,6369,
5733,6600,6601,4951,4746,4555,6602,5418,5475,6027,3400,4665,5806,6171,4799,6028,
5052,6172,3343,4800,4747,5006,6370,4556,4217,5476,4396,5229,5379,5477,3839,5914,
5652,5807,4714,3068,4635,5808,6173,5342,4192,5078,5419,5523,5734,6174,4557,6175,
4602,6371,6176,6603,5809,6372,5735,4260,3869,5111,5230,6029,5112,6177,3126,4681,
5524,5915,2706,3563,4748,3130,6178,4018,5525,6604,6605,5478,4012,4837,6606,4534,
4193,5810,4857,3615,5479,6030,4082,3697,3539,4086,5270,3662,4508,4931,5916,4912,
5811,5027,3888,6607,4397,3527,3302,3798,2775,2921,2637,3966,4122,4388,4028,4054,
1633,4858,5079,3024,5007,3982,3412,5736,6608,3426,3236,5595,3030,6179,3427,3336,
3279,3110,6373,3874,3039,5080,5917,5140,4489,3119,6374,5812,3405,4494,6031,4666,
4141,6180,4166,6032,5813,4981,6609,5081,4422,4982,4112,3915,5653,3296,3983,6375,
4266,4410,5654,6610,6181,3436,5082,6611,5380,6033,3819,5596,4535,5231,5306,5113,
6612,4952,5918,4275,3113,6613,6376,6182,6183,5814,3073,4731,4838,5008,3831,6614,
4888,3090,3848,4280,5526,5232,3014,5655,5009,5737,5420,5527,6615,5815,5343,5173,
5381,4818,6616,3151,4953,6617,5738,2796,3204,4360,2989,4281,5739,5174,5421,5197,
3132,5141,3849,5142,5528,5083,3799,3904,4839,5480,2880,4495,3448,6377,6184,5271,
5919,3771,3193,6034,6035,5920,5010,6036,5597,6037,6378,6038,3106,5422,6618,5423,
5424,4142,6619,4889,5084,4890,4313,5740,6620,3437,5175,5307,5816,4199,5198,5529,
5817,5199,5656,4913,5028,5344,3850,6185,2955,5272,5011,5818,4567,4580,5029,5921,
3616,5233,6621,6622,6186,4176,6039,6379,6380,3352,5200,5273,2908,5598,5234,3837,
5308,6623,6624,5819,4496,4323,5309,5201,6625,6626,4983,3194,3838,4167,5530,5922,
5274,6381,6382,3860,3861,5599,3333,4292,4509,6383,3553,5481,5820,5531,4778,6187,
3955,3956,4324,4389,4218,3945,4325,3397,2681,5923,4779,5085,4019,5482,4891,5382,
5383,6040,4682,3425,5275,4094,6627,5310,3015,5483,5657,4398,5924,3168,4819,6628,
5925,6629,5532,4932,4613,6041,6630,4636,6384,4780,4204,5658,4423,5821,3989,4683,
5822,6385,4954,6631,5345,6188,5425,5012,5384,3894,6386,4490,4104,6632,5741,5053,
6633,5823,5926,5659,5660,5927,6634,5235,5742,5824,4840,4933,4820,6387,4859,5928,
4955,6388,4143,3584,5825,5346,5013,6635,5661,6389,5014,5484,5743,4337,5176,5662,
6390,2836,6391,3268,6392,6636,6042,5236,6637,4158,6638,5744,5663,4471,5347,3663,
4123,5143,4293,3895,6639,6640,5311,5929,5826,3800,6189,6393,6190,5664,5348,3554,
3594,4749,4603,6641,5385,4801,6043,5827,4183,6642,5312,5426,4761,6394,5665,6191,
4715,2669,6643,6644,5533,3185,5427,5086,5930,5931,5386,6192,6044,6645,4781,4013,
5745,4282,4435,5534,4390,4267,6045,5746,4984,6046,2743,6193,3501,4087,5485,5932,
5428,4184,4095,5747,4061,5054,3058,3862,5933,5600,6646,5144,3618,6395,3131,5055,
5313,6396,4650,4956,3855,6194,3896,5202,4985,4029,4225,6195,6647,5828,5486,5829,
3589,3002,6648,6397,4782,5276,6649,6196,6650,4105,3803,4043,5237,5830,6398,4096,
3643,6399,3528,6651,4453,3315,4637,6652,3984,6197,5535,3182,3339,6653,3096,2660,
6400,6654,3449,5934,4250,4236,6047,6401,5831,6655,5487,3753,4062,5832,6198,6199,
6656,3766,6657,3403,4667,6048,6658,4338,2897,5833,3880,2797,3780,4326,6659,5748,
5015,6660,5387,4351,5601,4411,6661,3654,4424,5935,4339,4072,5277,4568,5536,6402,
6662,5238,6663,5349,5203,6200,5204,6201,5145,4536,5016,5056,4762,5834,4399,4957,
6202,6403,5666,5749,6664,4340,6665,5936,5177,5667,6666,6667,3459,4668,6404,6668,
6669,4543,6203,6670,4276,6405,4480,5537,6671,4614,5205,5668,6672,3348,2193,4763,
6406,6204,5937,5602,4177,5669,3419,6673,4020,6205,4443,4569,5388,3715,3639,6407,
6049,4058,6206,6674,5938,4544,6050,4185,4294,4841,4651,4615,5488,6207,6408,6051,
5178,3241,3509,5835,6208,4958,5836,4341,5489,5278,6209,2823,5538,5350,5206,5429,
6675,4638,4875,4073,3516,4684,4914,4860,5939,5603,5389,6052,5057,3237,5490,3791,
6676,6409,6677,4821,4915,4106,5351,5058,4243,5539,4244,5604,4842,4916,5239,3028,
3716,5837,5114,5605,5390,5940,5430,6210,4332,6678,5540,4732,3667,3840,6053,4305,
3408,5670,5541,6410,2744,5240,5750,6679,3234,5606,6680,5607,5671,3608,4283,4159,
4400,5352,4783,6681,6411,6682,4491,4802,6211,6412,5941,6413,6414,5542,5751,6683,
4669,3734,5942,6684,6415,5943,5059,3328,4670,4144,4268,6685,6686,6687,6688,4372,
3603,6689,5944,5491,4373,3440,6416,5543,4784,4822,5608,3792,4616,5838,5672,3514,
5391,6417,4892,6690,4639,6691,6054,5673,5839,6055,6692,6056,5392,6212,4038,5544,
5674,4497,6057,6693,5840,4284,5675,4021,4545,5609,6418,4454,6419,6213,4113,4472,
5314,3738,5087,5279,4074,5610,4959,4063,3179,4750,6058,6420,6214,3476,4498,4716,
5431,4960,4685,6215,5241,6694,6421,6216,6695,5841,5945,6422,3748,5946,5179,3905,
5752,5545,5947,4374,6217,4455,6423,4412,6218,4803,5353,6696,3832,5280,6219,4327,
4702,6220,6221,6059,4652,5432,6424,3749,4751,6425,5753,4986,5393,4917,5948,5030,
5754,4861,4733,6426,4703,6697,6222,4671,5949,4546,4961,5180,6223,5031,3316,5281,
6698,4862,4295,4934,5207,3644,6427,5842,5950,6428,6429,4570,5843,5282,6430,6224,
5088,3239,6060,6699,5844,5755,6061,6431,2701,5546,6432,5115,5676,4039,3993,3327,
4752,4425,5315,6433,3941,6434,5677,4617,4604,3074,4581,6225,5433,6435,6226,6062,
4823,5756,5116,6227,3717,5678,4717,5845,6436,5679,5846,6063,5847,6064,3977,3354,
6437,3863,5117,6228,5547,5394,4499,4524,6229,4605,6230,4306,4500,6700,5951,6065,
3693,5952,5089,4366,4918,6701,6231,5548,6232,6702,6438,4704,5434,6703,6704,5953,
4168,6705,5680,3420,6706,5242,4407,6066,3812,5757,5090,5954,4672,4525,3481,5681,
4618,5395,5354,5316,5955,6439,4962,6707,4526,6440,3465,4673,6067,6441,5682,6708,
5435,5492,5758,5683,4619,4571,4674,4804,4893,4686,5493,4753,6233,6068,4269,6442,
6234,5032,4705,5146,5243,5208,5848,6235,6443,4963,5033,4640,4226,6236,5849,3387,
6444,6445,4436,4437,5850,4843,5494,4785,4894,6709,4361,6710,5091,5956,3331,6237,
4987,5549,6069,6711,4342,3517,4473,5317,6070,6712,6071,4706,6446,5017,5355,6713,
6714,4988,5436,6447,4734,5759,6715,4735,4547,4456,4754,6448,5851,6449,6450,3547,
5852,5318,6451,6452,5092,4205,6716,6238,4620,4219,5611,6239,6072,4481,5760,5957,
5958,4059,6240,6453,4227,4537,6241,5761,4030,4186,5244,5209,3761,4457,4876,3337,
5495,5181,6242,5959,5319,5612,5684,5853,3493,5854,6073,4169,5613,5147,4895,6074,
5210,6717,5182,6718,3830,6243,2798,3841,6075,6244,5855,5614,3604,4606,5496,5685,
5118,5356,6719,6454,5960,5357,5961,6720,4145,3935,4621,5119,5962,4261,6721,6455,
4786,5963,4375,4582,6245,6246,6247,6076,5437,4877,5856,3376,4380,6248,4160,6722,
5148,6456,5211,6457,6723,4718,6458,6724,6249,5358,4044,3297,6459,6250,5857,5615,
5497,5245,6460,5498,6725,6251,6252,5550,3793,5499,2959,5396,6461,6462,4572,5093,
5500,5964,3806,4146,6463,4426,5762,5858,6077,6253,4755,3967,4220,5965,6254,4989,
5501,6464,4352,6726,6078,4764,2290,5246,3906,5438,5283,3767,4964,2861,5763,5094,
6255,6256,4622,5616,5859,5860,4707,6727,4285,4708,4824,5617,6257,5551,4787,5212,
4965,4935,4687,6465,6728,6466,5686,6079,3494,4413,2995,5247,5966,5618,6729,5967,
5764,5765,5687,5502,6730,6731,6080,5397,6467,4990,6258,6732,4538,5060,5619,6733,
4719,5688,5439,5018,5149,5284,5503,6734,6081,4607,6259,5120,3645,5861,4583,6260,
4584,4675,5620,4098,5440,6261,4863,2379,3306,4585,5552,5689,4586,5285,6735,4864,
6736,5286,6082,6737,4623,3010,4788,4381,4558,5621,4587,4896,3698,3161,5248,4353,
4045,6262,3754,5183,4588,6738,6263,6739,6740,5622,3936,6741,6468,6742,6264,5095,
6469,4991,5968,6743,4992,6744,6083,4897,6745,4256,5766,4307,3108,3968,4444,5287,
3889,4343,6084,4510,6085,4559,6086,4898,5969,6746,5623,5061,4919,5249,5250,5504,
5441,6265,5320,4878,3242,5862,5251,3428,6087,6747,4237,5624,5442,6266,5553,4539,
6748,2585,3533,5398,4262,6088,5150,4736,4438,6089,6267,5505,4966,6749,6268,6750,
6269,5288,5554,3650,6090,6091,4624,6092,5690,6751,5863,4270,5691,4277,5555,5864,
6752,5692,4720,4865,6470,5151,4688,4825,6753,3094,6754,6471,3235,4653,6755,5213,
5399,6756,3201,4589,5865,4967,6472,5866,6473,5019,3016,6757,5321,4756,3957,4573,
6093,4993,5767,4721,6474,6758,5625,6759,4458,6475,6270,6760,5556,4994,5214,5252,
6271,3875,5768,6094,5034,5506,4376,5769,6761,2120,6476,5253,5770,6762,5771,5970,
3990,5971,5557,5558,5772,6477,6095,2787,4641,5972,5121,6096,6097,6272,6763,3703,
5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978,
4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767)
# flake8: noqa
| gpl-3.0 |
b-me/django | tests/urlpatterns_reverse/middleware.py | 386 | 1050 | from django.core.urlresolvers import reverse
from django.http import HttpResponse, StreamingHttpResponse
from . import urlconf_inner
class ChangeURLconfMiddleware(object):
def process_request(self, request):
request.urlconf = urlconf_inner.__name__
class NullChangeURLconfMiddleware(object):
def process_request(self, request):
request.urlconf = None
class ReverseInnerInResponseMiddleware(object):
def process_response(self, *args, **kwargs):
return HttpResponse(reverse('inner'))
class ReverseOuterInResponseMiddleware(object):
def process_response(self, *args, **kwargs):
return HttpResponse(reverse('outer'))
class ReverseInnerInStreaming(object):
def process_view(self, *args, **kwargs):
def stream():
yield reverse('inner')
return StreamingHttpResponse(stream())
class ReverseOuterInStreaming(object):
def process_view(self, *args, **kwargs):
def stream():
yield reverse('outer')
return StreamingHttpResponse(stream())
| bsd-3-clause |
MarkWh1te/xueqiu_predict | python3_env/lib/python3.4/site-packages/pygments/lexers/esoteric.py | 23 | 7083 | # -*- coding: utf-8 -*-
"""
pygments.lexers.esoteric
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for esoteric languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Whitespace
__all__ = ['BrainfuckLexer', 'BefungeLexer', 'BoogieLexer', 'RedcodeLexer', 'CAmkESLexer']
class BrainfuckLexer(RegexLexer):
"""
Lexer for the esoteric `BrainFuck <http://www.muppetlabs.com/~breadbox/bf/>`_
language.
"""
name = 'Brainfuck'
aliases = ['brainfuck', 'bf']
filenames = ['*.bf', '*.b']
mimetypes = ['application/x-brainfuck']
tokens = {
'common': [
# use different colors for different instruction types
(r'[.,]+', Name.Tag),
(r'[+-]+', Name.Builtin),
(r'[<>]+', Name.Variable),
(r'[^.,+\-<>\[\]]+', Comment),
],
'root': [
(r'\[', Keyword, 'loop'),
(r'\]', Error),
include('common'),
],
'loop': [
(r'\[', Keyword, '#push'),
(r'\]', Keyword, '#pop'),
include('common'),
]
}
class BefungeLexer(RegexLexer):
"""
Lexer for the esoteric `Befunge <http://en.wikipedia.org/wiki/Befunge>`_
language.
.. versionadded:: 0.7
"""
name = 'Befunge'
aliases = ['befunge']
filenames = ['*.befunge']
mimetypes = ['application/x-befunge']
tokens = {
'root': [
(r'[0-9a-f]', Number),
(r'[+*/%!`-]', Operator), # Traditional math
(r'[<>^v?\[\]rxjk]', Name.Variable), # Move, imperatives
(r'[:\\$.,n]', Name.Builtin), # Stack ops, imperatives
(r'[|_mw]', Keyword),
(r'[{}]', Name.Tag), # Befunge-98 stack ops
(r'".*?"', String.Double), # Strings don't appear to allow escapes
(r'\'.', String.Single), # Single character
(r'[#;]', Comment), # Trampoline... depends on direction hit
(r'[pg&~=@iotsy]', Keyword), # Misc
(r'[()A-Z]', Comment), # Fingerprints
(r'\s+', Text), # Whitespace doesn't matter
],
}
class CAmkESLexer(RegexLexer):
"""
Basic lexer for the input language for the
`CAmkES <https://sel4.systems/CAmkES/>`_ component platform.
.. versionadded:: 2.1
"""
name = 'CAmkES'
aliases = ['camkes', 'idl4']
filenames = ['*.camkes', '*.idl4']
tokens = {
'root':[
# C pre-processor directive
(r'^\s*#.*\n', Comment.Preproc),
# Whitespace, comments
(r'\s+', Text),
(r'/\*(.|\n)*?\*/', Comment),
(r'//.*\n', Comment),
(r'[\[\(\){},\.;=\]]', Punctuation),
(words(('assembly', 'attribute', 'component', 'composition',
'configuration', 'connection', 'connector', 'consumes',
'control', 'dataport', 'Dataport', 'emits', 'event',
'Event', 'from', 'group', 'hardware', 'has', 'interface',
'Interface', 'maybe', 'procedure', 'Procedure', 'provides',
'template', 'to', 'uses'), suffix=r'\b'), Keyword),
(words(('bool', 'boolean', 'Buf', 'char', 'character', 'double',
'float', 'in', 'inout', 'int', 'int16_6', 'int32_t',
'int64_t', 'int8_t', 'integer', 'mutex', 'out', 'real',
'refin', 'semaphore', 'signed', 'string', 'uint16_t',
'uint32_t', 'uint64_t', 'uint8_t', 'uintptr_t', 'unsigned',
'void'), suffix=r'\b'), Keyword.Type),
# Recognised attributes
(r'[a-zA-Z_]\w*_(priority|domain|buffer)', Keyword.Reserved),
(words(('dma_pool', 'from_access', 'to_access'), suffix=r'\b'),
Keyword.Reserved),
# CAmkES-level include
(r'import\s+(<[^>]*>|"[^"]*");', Comment.Preproc),
# C-level include
(r'include\s+(<[^>]*>|"[^"]*");', Comment.Preproc),
# Literals
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'-?[\d]+', Number),
(r'-?[\d]+\.[\d]+', Number.Float),
(r'"[^"]*"', String),
# Identifiers
(r'[a-zA-Z_]\w*', Name),
],
}
class RedcodeLexer(RegexLexer):
"""
A simple Redcode lexer based on ICWS'94.
Contributed by Adam Blinkinsop <[email protected]>.
.. versionadded:: 0.8
"""
name = 'Redcode'
aliases = ['redcode']
filenames = ['*.cw']
opcodes = ('DAT', 'MOV', 'ADD', 'SUB', 'MUL', 'DIV', 'MOD',
'JMP', 'JMZ', 'JMN', 'DJN', 'CMP', 'SLT', 'SPL',
'ORG', 'EQU', 'END')
modifiers = ('A', 'B', 'AB', 'BA', 'F', 'X', 'I')
tokens = {
'root': [
# Whitespace:
(r'\s+', Text),
(r';.*$', Comment.Single),
# Lexemes:
# Identifiers
(r'\b(%s)\b' % '|'.join(opcodes), Name.Function),
(r'\b(%s)\b' % '|'.join(modifiers), Name.Decorator),
(r'[A-Za-z_]\w+', Name),
# Operators
(r'[-+*/%]', Operator),
(r'[#$@<>]', Operator), # mode
(r'[.,]', Punctuation), # mode
# Numbers
(r'[-+]?\d+', Number.Integer),
],
}
class BoogieLexer(RegexLexer):
"""
For `Boogie <https://boogie.codeplex.com/>`_ source code.
.. versionadded:: 2.1
"""
name = 'Boogie'
aliases = ['boogie']
filenames = ['*.bpl']
tokens = {
'root': [
# Whitespace and Comments
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'//[/!](.*?)\n', Comment.Doc),
(r'//(.*?)\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(words((
'axiom', 'break', 'call', 'ensures', 'else', 'exists', 'function',
'forall', 'if', 'invariant', 'modifies', 'procedure', 'requires',
'then', 'var', 'while'),
suffix=r'\b'), Keyword),
(words(('const',), suffix=r'\b'), Keyword.Reserved),
(words(('bool', 'int', 'ref'), suffix=r'\b'), Keyword.Type),
include('numbers'),
(r"(>=|<=|:=|!=|==>|&&|\|\||[+/\-=>*<\[\]])", Operator),
(r"([{}():;,.])", Punctuation),
# Identifier
(r'[a-zA-Z_]\w*', Name),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'numbers': [
(r'[0-9]+', Number.Integer),
],
}
| mit |
kevinlee12/oppia | core/storage/activity/gae_models_test.py | 2 | 2748 | # coding: utf-8
#
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for core.storage.activity.gae_models."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from constants import constants
from core.platform import models
from core.tests import test_utils
(base_models, activity_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.activity])
class ActivityListModelTest(test_utils.GenericTestBase):
"""Tests the ActivityListModel class."""
def test_get_deletion_policy(self):
self.assertEqual(
activity_models.ActivityReferencesModel.get_deletion_policy(),
base_models.DELETION_POLICY.NOT_APPLICABLE)
def test_featured_activity_list_always_exists(self):
featured_model_instance = (
activity_models.ActivityReferencesModel.get_or_create('featured'))
self.assertIsNotNone(featured_model_instance)
self.assertEqual(featured_model_instance.id, 'featured')
self.assertEqual(featured_model_instance.activity_references, [])
def test_retrieving_non_existent_list(self):
with self.assertRaisesRegexp(Exception, 'Invalid ActivityListModel'):
activity_models.ActivityReferencesModel.get_or_create(
'nonexistent_key')
def test_updating_featured_activity_list(self):
featured_model_instance = (
activity_models.ActivityReferencesModel.get_or_create('featured'))
self.assertEqual(featured_model_instance.activity_references, [])
featured_model_instance.activity_references = [{
'type': constants.ACTIVITY_TYPE_EXPLORATION,
'id': '0',
}]
featured_model_instance.put()
featured_model_instance = (
activity_models.ActivityReferencesModel.get_or_create('featured'))
self.assertEqual(featured_model_instance.id, 'featured')
self.assertEqual(
featured_model_instance.activity_references, [{
'type': constants.ACTIVITY_TYPE_EXPLORATION,
'id': '0',
}])
| apache-2.0 |
comodit/demos | ceph-cluster/downscale_osds.py | 1 | 1977 | #!/usr/bin/env python
import time, config
from optparse import OptionParser
from comodit_client.api import Client
from comodit_client.api.exceptions import PythonApiException
from comodit_client.api.host import Host
from helper import get_latest_id, get_short_hostname, create_host
def downscale_osds(count):
# Script
print "Down-scaling Ceph cluster (OSDs)"
start_time = time.time()
# Connect to the ComodIT API
client = Client(config.endpoint, config.username, config.password)
env = client.get_environment(config.organization, 'Cluster')
latest_id = get_latest_id('Object Store ', env)
if latest_id < 0:
raise Exception("No OSD found")
if latest_id - count + 1 <= 1:
raise Exception("Cannot down-scale to less than 2 OSDs")
osd_hosts = []
for i in xrange(latest_id - count + 1, latest_id + 1):
osd = env.get_host('Object Store ' + str(i))
print "Bringing Object Store %i out of cluster..." % i
osd.settings().create("status", "out")
osd_hosts.append(osd)
for h in osd_hosts:
h.wait_for_pending_changes()
print "Configure cluster..."
next_id = latest_id + 1
osds = env.get_setting("osds").value
for i in xrange(0, len(osds)):
osd = osds[i]
id = int(osd["id"])
if id >= latest_id - count + 1 and id < latest_id + 1:
del osds[i]
env.settings().update("osds", osds)
time.sleep(3)
print "Deleting OSD(s)..."
for h in osd_hosts:
h.get_instance().delete()
h.delete()
total_time = time.time() - start_time
print "Down-scaling time: " + str(total_time)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", "--count", dest = "count", type = "int", help = "the number of OSDs to remove", default = 1)
(options, args) = parser.parse_args()
try:
downscale_osds(options.count)
except PythonApiException as e:
print e
| mit |
darktears/chromium-crosswalk | third_party/cython/src/Cython/Build/Cythonize.py | 90 | 6864 | #!/usr/bin/env python
import os
import shutil
import tempfile
from distutils.core import setup
from Cython.Build.Dependencies import cythonize, extended_iglob
from Cython.Utils import is_package_dir
from Cython.Compiler import Options
try:
import multiprocessing
parallel_compiles = int(multiprocessing.cpu_count() * 1.5)
except ImportError:
multiprocessing = None
parallel_compiles = 0
class _FakePool(object):
def map_async(self, func, args):
from itertools import imap
for _ in imap(func, args):
pass
def close(self): pass
def terminate(self): pass
def join(self): pass
def parse_directives(option, name, value, parser):
dest = option.dest
old_directives = dict(getattr(parser.values, dest,
Options.directive_defaults))
directives = Options.parse_directive_list(
value, relaxed_bool=True, current_settings=old_directives)
setattr(parser.values, dest, directives)
def parse_options(option, name, value, parser):
dest = option.dest
options = dict(getattr(parser.values, dest, {}))
for opt in value.split(','):
if '=' in opt:
n, v = opt.split('=', 1)
v = v.lower() not in ('false', 'f', '0', 'no')
else:
n, v = opt, True
options[n] = v
setattr(parser.values, dest, options)
def find_package_base(path):
base_dir, package_path = os.path.split(path)
while os.path.isfile(os.path.join(base_dir, '__init__.py')):
base_dir, parent = os.path.split(base_dir)
package_path = '%s/%s' % (parent, package_path)
return base_dir, package_path
def cython_compile(path_pattern, options):
pool = None
paths = map(os.path.abspath, extended_iglob(path_pattern))
try:
for path in paths:
if options.build_inplace:
base_dir = path
while not os.path.isdir(base_dir) or is_package_dir(base_dir):
base_dir = os.path.dirname(base_dir)
else:
base_dir = None
if os.path.isdir(path):
# recursively compiling a package
paths = [os.path.join(path, '**', '*.%s' % ext)
for ext in ('py', 'pyx')]
else:
# assume it's a file(-like thing)
paths = [path]
ext_modules = cythonize(
paths,
nthreads=options.parallel,
exclude_failures=options.keep_going,
exclude=options.excludes,
compiler_directives=options.directives,
force=options.force,
quiet=options.quiet,
**options.options)
if ext_modules and options.build:
if len(ext_modules) > 1 and options.parallel > 1:
if pool is None:
try:
pool = multiprocessing.Pool(options.parallel)
except OSError:
pool = _FakePool()
pool.map_async(run_distutils, [
(base_dir, [ext]) for ext in ext_modules])
else:
run_distutils((base_dir, ext_modules))
except:
if pool is not None:
pool.terminate()
raise
else:
if pool is not None:
pool.close()
pool.join()
def run_distutils(args):
base_dir, ext_modules = args
script_args = ['build_ext', '-i']
cwd = os.getcwd()
temp_dir = None
try:
if base_dir:
os.chdir(base_dir)
temp_dir = tempfile.mkdtemp(dir=base_dir)
script_args.extend(['--build-temp', temp_dir])
setup(
script_name='setup.py',
script_args=script_args,
ext_modules=ext_modules,
)
finally:
if base_dir:
os.chdir(cwd)
if temp_dir and os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
def parse_args(args):
from optparse import OptionParser
parser = OptionParser(usage='%prog [options] [sources and packages]+')
parser.add_option('-X', '--directive', metavar='NAME=VALUE,...', dest='directives',
type=str, action='callback', callback=parse_directives, default={},
help='set a compiler directive')
parser.add_option('-s', '--option', metavar='NAME=VALUE', dest='options',
type=str, action='callback', callback=parse_options, default={},
help='set a cythonize option')
parser.add_option('-3', dest='python3_mode', action='store_true',
help='use Python 3 syntax mode by default')
parser.add_option('-x', '--exclude', metavar='PATTERN', dest='excludes',
action='append', default=[],
help='exclude certain file patterns from the compilation')
parser.add_option('-b', '--build', dest='build', action='store_true',
help='build extension modules using distutils')
parser.add_option('-i', '--inplace', dest='build_inplace', action='store_true',
help='build extension modules in place using distutils (implies -b)')
parser.add_option('-j', '--parallel', dest='parallel', metavar='N',
type=int, default=parallel_compiles,
help=('run builds in N parallel jobs (default: %d)' %
parallel_compiles or 1))
parser.add_option('-f', '--force', dest='force', action='store_true',
help='force recompilation')
parser.add_option('-q', '--quiet', dest='quiet', action='store_true',
help='be less verbose during compilation')
parser.add_option('--lenient', dest='lenient', action='store_true',
help='increase Python compatibility by ignoring some compile time errors')
parser.add_option('-k', '--keep-going', dest='keep_going', action='store_true',
help='compile as much as possible, ignore compilation failures')
options, args = parser.parse_args(args)
if not args:
parser.error("no source files provided")
if options.build_inplace:
options.build = True
if multiprocessing is None:
options.parallel = 0
if options.python3_mode:
options.options['language_level'] = 3
return options, args
def main(args=None):
options, paths = parse_args(args)
if options.lenient:
# increase Python compatibility by ignoring compile time errors
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
for path in paths:
cython_compile(path, options)
if __name__ == '__main__':
main()
| bsd-3-clause |
wolf29/games | py-proj-building-blocks/pygame-1.9.1release/test/run_tests__tests/incomplete/fake_2_test.py | 5 | 1238 | if __name__ == '__main__':
import sys
import os
pkg_dir = (os.path.split(
os.path.split(
os.path.split(
os.path.abspath(__file__))[0])[0])[0])
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests import test_utils
from pygame.tests.test_utils import unittest
else:
from test import test_utils
from test.test_utils import unittest
class KeyModuleTest(unittest.TestCase):
def test_get_focused(self):
self.assert_(True)
def test_get_mods(self):
self.assert_(True)
def test_get_pressed(self):
self.assert_(test_utils.test_not_implemented())
def test_name(self):
self.assert_(True)
def test_set_mods(self):
self.assert_(test_utils.test_not_implemented())
def test_set_repeat(self):
self.assert_(True)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
ContinuumIO/watchdog | tests/__init__.py | 7 | 1387 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Thomas Amland <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from . import shell
from sys import version_info
from functools import partial
__all__ = ['unittest', 'Queue', 'tmpdir', 'p']
if version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
try:
from Queue import Queue # Python 2
except ImportError:
from queue import Queue # Python 3
@pytest.fixture()
def tmpdir(request):
path = os.path.realpath(shell.mkdtemp())
def finalizer():
shell.rm(path, recursive=True)
request.addfinalizer(finalizer)
return path
@pytest.fixture()
def p(tmpdir, *args):
"""
Convenience function to join the temporary directory path
with the provided arguments.
"""
return partial(os.path.join, tmpdir)
| apache-2.0 |
kennedyshead/home-assistant | homeassistant/components/wirelesstag/sensor.py | 5 | 3620 | """Sensor support for Wireless Sensor Tags platform."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_MONITORED_CONDITIONS
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DOMAIN as WIRELESSTAG_DOMAIN, SIGNAL_TAG_UPDATE, WirelessTagBaseSensor
_LOGGER = logging.getLogger(__name__)
SENSOR_TEMPERATURE = "temperature"
SENSOR_HUMIDITY = "humidity"
SENSOR_MOISTURE = "moisture"
SENSOR_LIGHT = "light"
SENSOR_TYPES = [SENSOR_TEMPERATURE, SENSOR_HUMIDITY, SENSOR_MOISTURE, SENSOR_LIGHT]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_CONDITIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
)
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor platform."""
platform = hass.data.get(WIRELESSTAG_DOMAIN)
sensors = []
tags = platform.tags
for tag in tags.values():
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
if sensor_type in tag.allowed_sensor_types:
sensors.append(
WirelessTagSensor(platform, tag, sensor_type, hass.config)
)
add_entities(sensors, True)
class WirelessTagSensor(WirelessTagBaseSensor, SensorEntity):
"""Representation of a Sensor."""
def __init__(self, api, tag, sensor_type, config):
"""Initialize a WirelessTag sensor."""
super().__init__(api, tag)
self._sensor_type = sensor_type
self._name = self._tag.name
# I want to see entity_id as:
# sensor.wirelesstag_bedroom_temperature
# and not as sensor.bedroom for temperature and
# sensor.bedroom_2 for humidity
self._entity_id = (
f"sensor.{WIRELESSTAG_DOMAIN}_{self.underscored_name}_{self._sensor_type}"
)
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_TAG_UPDATE.format(self.tag_id, self.tag_manager_mac),
self._update_tag_info_callback,
)
)
@property
def entity_id(self):
"""Overridden version."""
return self._entity_id
@property
def underscored_name(self):
"""Provide name savvy to be used in entity_id name of self."""
return self.name.lower().replace(" ", "_")
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_class(self):
"""Return the class of the sensor."""
return self._sensor_type
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._sensor.unit
@property
def principal_value(self):
"""Return sensor current value."""
return self._sensor.value
@property
def _sensor(self):
"""Return tag sensor entity."""
return self._tag.sensor[self._sensor_type]
@callback
def _update_tag_info_callback(self, event):
"""Handle push notification sent by tag manager."""
_LOGGER.debug("Entity to update state: %s event data: %s", self, event.data)
new_value = self._sensor.value_from_update_event(event.data)
self._state = self.decorate_value(new_value)
self.async_write_ha_state()
| apache-2.0 |
hnakamur/django-rest-framework-json-api | example/tests/utils.py | 3 | 1101 | import json
from django.utils.encoding import force_bytes, force_text
def dump_json(data):
'''
Converts a Python object to a JSON formatted string.
'''
json_kwargs = {
'sort_keys': True,
'indent': 4,
'separators': (', ', ': ')
}
return force_bytes(json.dumps(data, **json_kwargs))
def redump_json(data):
'''
The response.content is already a JSON formatted string BUT
we don't know anything about its formatting, in particular,
the indent and separators arguments. DRF has a complex method to
determine what values to use for each argument and unfortunately,
the methods aren't the same in all DRF versions.
So what to do? LOAD the JSON formmated string (response.content)
as a Python object and DUMP it again and hence the name of this function.
This will guarantee that we're comparing two similarly formatted JSON
strings. Only the formatting similarity is guaranteed. As for the content,
that's what the tests are for!
'''
data = json.loads(force_text(data))
return dump_json(data)
| bsd-2-clause |
gitaarik/django | django/contrib/postgres/fields/array.py | 59 | 9933 | import json
from django.contrib.postgres import lookups
from django.contrib.postgres.forms import SimpleArrayField
from django.contrib.postgres.validators import ArrayMaxLengthValidator
from django.core import checks, exceptions
from django.db.models import Field, IntegerField, Transform
from django.db.models.lookups import Exact, In
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from ..utils import prefix_validation_error
from .utils import AttributeSetter
__all__ = ['ArrayField']
class ArrayField(Field):
empty_strings_allowed = False
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
'nested_array_mismatch': _('Nested arrays must have the same length.'),
}
def __init__(self, base_field, size=None, **kwargs):
self.base_field = base_field
self.size = size
if self.size:
self.default_validators = self.default_validators[:]
self.default_validators.append(ArrayMaxLengthValidator(self.size))
# For performance, only add a from_db_value() method if the base field
# implements it.
if hasattr(self.base_field, 'from_db_value'):
self.from_db_value = self._from_db_value
super(ArrayField, self).__init__(**kwargs)
@property
def model(self):
try:
return self.__dict__['model']
except KeyError:
raise AttributeError("'%s' object has no attribute 'model'" % self.__class__.__name__)
@model.setter
def model(self, model):
self.__dict__['model'] = model
self.base_field.model = model
def check(self, **kwargs):
errors = super(ArrayField, self).check(**kwargs)
if self.base_field.remote_field:
errors.append(
checks.Error(
'Base field for array cannot be a related field.',
obj=self,
id='postgres.E002'
)
)
else:
# Remove the field name checks as they are not needed here.
base_errors = self.base_field.check()
if base_errors:
messages = '\n '.join('%s (%s)' % (error.msg, error.id) for error in base_errors)
errors.append(
checks.Error(
'Base field for array has errors:\n %s' % messages,
obj=self,
id='postgres.E001'
)
)
return errors
def set_attributes_from_name(self, name):
super(ArrayField, self).set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
@property
def description(self):
return 'Array of %s' % self.base_field.description
def db_type(self, connection):
size = self.size or ''
return '%s[%s]' % (self.base_field.db_type(connection), size)
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, list) or isinstance(value, tuple):
return [self.base_field.get_db_prep_value(i, connection, prepared=False) for i in value]
return value
def deconstruct(self):
name, path, args, kwargs = super(ArrayField, self).deconstruct()
if path == 'django.contrib.postgres.fields.array.ArrayField':
path = 'django.contrib.postgres.fields.ArrayField'
kwargs.update({
'base_field': self.base_field,
'size': self.size,
})
return name, path, args, kwargs
def to_python(self, value):
if isinstance(value, six.string_types):
# Assume we're deserializing
vals = json.loads(value)
value = [self.base_field.to_python(val) for val in vals]
return value
def _from_db_value(self, value, expression, connection, context):
if value is None:
return value
return [
self.base_field.from_db_value(item, expression, connection, context)
for item in value
]
def value_to_string(self, obj):
values = []
vals = self.value_from_object(obj)
base_field = self.base_field
for val in vals:
if val is None:
values.append(None)
else:
obj = AttributeSetter(base_field.attname, val)
values.append(base_field.value_to_string(obj))
return json.dumps(values)
def get_transform(self, name):
transform = super(ArrayField, self).get_transform(name)
if transform:
return transform
try:
index = int(name)
except ValueError:
pass
else:
index += 1 # postgres uses 1-indexing
return IndexTransformFactory(index, self.base_field)
try:
start, end = name.split('_')
start = int(start) + 1
end = int(end) # don't add one here because postgres slices are weird
except ValueError:
pass
else:
return SliceTransformFactory(start, end)
def validate(self, value, model_instance):
super(ArrayField, self).validate(value, model_instance)
for index, part in enumerate(value):
try:
self.base_field.validate(part, model_instance)
except exceptions.ValidationError as error:
raise prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
)
if isinstance(self.base_field, ArrayField):
if len({len(i) for i in value}) > 1:
raise exceptions.ValidationError(
self.error_messages['nested_array_mismatch'],
code='nested_array_mismatch',
)
def run_validators(self, value):
super(ArrayField, self).run_validators(value)
for index, part in enumerate(value):
try:
self.base_field.run_validators(part)
except exceptions.ValidationError as error:
raise prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
)
def formfield(self, **kwargs):
defaults = {
'form_class': SimpleArrayField,
'base_field': self.base_field.formfield(),
'max_length': self.size,
}
defaults.update(kwargs)
return super(ArrayField, self).formfield(**defaults)
@ArrayField.register_lookup
class ArrayContains(lookups.DataContains):
def as_sql(self, qn, connection):
sql, params = super(ArrayContains, self).as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayContainedBy(lookups.ContainedBy):
def as_sql(self, qn, connection):
sql, params = super(ArrayContainedBy, self).as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayExact(Exact):
def as_sql(self, qn, connection):
sql, params = super(ArrayExact, self).as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayOverlap(lookups.Overlap):
def as_sql(self, qn, connection):
sql, params = super(ArrayOverlap, self).as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayLenTransform(Transform):
lookup_name = 'len'
output_field = IntegerField()
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
# Distinguish NULL and empty arrays
return (
'CASE WHEN %(lhs)s IS NULL THEN NULL ELSE '
'coalesce(array_length(%(lhs)s, 1), 0) END'
) % {'lhs': lhs}, params
@ArrayField.register_lookup
class ArrayInLookup(In):
def get_prep_lookup(self):
values = super(ArrayInLookup, self).get_prep_lookup()
# In.process_rhs() expects values to be hashable, so convert lists
# to tuples.
return [tuple(value) for value in values]
class IndexTransform(Transform):
def __init__(self, index, base_field, *args, **kwargs):
super(IndexTransform, self).__init__(*args, **kwargs)
self.index = index
self.base_field = base_field
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s]' % (lhs, self.index), params
@property
def output_field(self):
return self.base_field
class IndexTransformFactory(object):
def __init__(self, index, base_field):
self.index = index
self.base_field = base_field
def __call__(self, *args, **kwargs):
return IndexTransform(self.index, self.base_field, *args, **kwargs)
class SliceTransform(Transform):
def __init__(self, start, end, *args, **kwargs):
super(SliceTransform, self).__init__(*args, **kwargs)
self.start = start
self.end = end
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s:%s]' % (lhs, self.start, self.end), params
class SliceTransformFactory(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __call__(self, *args, **kwargs):
return SliceTransform(self.start, self.end, *args, **kwargs)
| bsd-3-clause |
Yannig/ansible | lib/ansible/modules/network/avi/avi_sslkeyandcertificate.py | 27 | 5751 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_sslkeyandcertificate
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of SSLKeyAndCertificate Avi RESTful Object
description:
- This module is used to configure SSLKeyAndCertificate object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
ca_certs:
description:
- Ca certificates in certificate chain.
certificate:
description:
- Sslcertificate settings for sslkeyandcertificate.
required: true
certificate_management_profile_ref:
description:
- It is a reference to an object of type certificatemanagementprofile.
created_by:
description:
- Creator name.
dynamic_params:
description:
- Dynamic parameters needed for certificate management profile.
enckey_base64:
description:
- Encrypted private key corresponding to the private key (e.g.
- Those generated by an hsm such as thales nshield).
enckey_name:
description:
- Name of the encrypted private key (e.g.
- Those generated by an hsm such as thales nshield).
hardwaresecuritymodulegroup_ref:
description:
- It is a reference to an object of type hardwaresecuritymodulegroup.
key:
description:
- Private key.
key_params:
description:
- Sslkeyparams settings for sslkeyandcertificate.
name:
description:
- Name of the object.
required: true
status:
description:
- Enum options - ssl_certificate_finished, ssl_certificate_pending.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_FINISHED.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Enum options - ssl_certificate_type_virtualservice, ssl_certificate_type_system, ssl_certificate_type_ca.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_TYPE_VIRTUALSERVICE.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a SSL Key and Certificate
avi_sslkeyandcertificate:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
key: |
-----BEGIN PRIVATE KEY-----
....
-----END PRIVATE KEY-----
certificate:
self_signed: true
certificate: |
-----BEGIN CERTIFICATE-----
....
-----END CERTIFICATE-----
type: SSL_CERTIFICATE_TYPE_VIRTUALSERVICE
name: MyTestCert
'''
RETURN = '''
obj:
description: SSLKeyAndCertificate (api/sslkeyandcertificate) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
ca_certs=dict(type='list',),
certificate=dict(type='dict', required=True),
certificate_management_profile_ref=dict(type='str',),
created_by=dict(type='str',),
dynamic_params=dict(type='list',),
enckey_base64=dict(type='str',),
enckey_name=dict(type='str',),
hardwaresecuritymodulegroup_ref=dict(type='str',),
key=dict(type='str', no_log=True,),
key_params=dict(type='dict',),
name=dict(type='str', required=True),
status=dict(type='str',),
tenant_ref=dict(type='str',),
type=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslkeyandcertificate',
set(['key']))
if __name__ == '__main__':
main()
| gpl-3.0 |
makfire/gstudio | gnowsys-ndf/gnowsys_ndf/ndf/views/imageDashboard.py | 1 | 13026 | ''' -- imports from installed packages -- '''
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, render
from django.template import RequestContext
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
from gnowsys_ndf.ndf.models import File
''' -- imports from application folders/files -- '''
from gnowsys_ndf.settings import META_TYPE, GAPPS, MEDIA_ROOT
from gnowsys_ndf.ndf.models import node_collection
from gnowsys_ndf.ndf.views.methods import get_node_common_fields,create_grelation_list,get_execution_time
from gnowsys_ndf.ndf.views.methods import get_node_metadata, node_thread_access, create_thread_for_node
from gnowsys_ndf.ndf.management.commands.data_entry import create_gattribute
from gnowsys_ndf.ndf.views.methods import get_node_metadata, get_node_common_fields, create_gattribute, get_page, get_execution_time,set_all_urls,get_group_name_id
gapp_mt = node_collection.one({'_type': "MetaType", 'name': META_TYPE[0]})
GST_IMAGE = node_collection.one({'member_of': gapp_mt._id, 'name': GAPPS[3]})
@get_execution_time
def imageDashboard(request, group_id, image_id=None):
'''
fetching image acording to group name
'''
# ins_objectid = ObjectId()
# if ins_objectid.is_valid(group_id) is False :
# group_ins = node_collection.find_one({'_type': "Group", "name": group_id})
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if group_ins:
# group_id = str(group_ins._id)
# else :
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth :
# group_id = str(auth._id)
# else :
# pass
try:
group_id = ObjectId(group_id)
except:
group_name, group_id = get_group_name_id(group_id)
if image_id is None:
image_ins = node_collection.find_one({'_type': "GSystemType", "name": "Image"})
if image_ins:
image_id = str(image_ins._id)
img_col = node_collection.find({'_type': 'File', 'member_of': {'$all': [ObjectId(image_id)]}, 'group_set': {'$all': [ObjectId(group_id)]}})
template = "ndf/ImageDashboard.html"
already_uploaded=request.GET.getlist('var',"")
variable = RequestContext(request, {'imageCollection': img_col,'already_uploaded':already_uploaded,'groupid':group_id,'group_id':group_id })
return render_to_response(template, variable)
@get_execution_time
def getImageThumbnail(request, group_id, _id):
'''
this funciton can be called to get thumbnail of image throw url
'''
# ins_objectid = ObjectId()
# if ins_objectid.is_valid(group_id) is False :
# group_ins = node_collection.find_one({'_type': "Group","name": group_id})
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if group_ins:
# group_id = str(group_ins._id)
# else :
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth :
# group_id = str(auth._id)
# else :
# pass
try:
group_id = ObjectId(group_id)
except:
group_name, group_id = get_group_name_id(group_id)
img_obj = node_collection.one({"_type": u"File", "_id": ObjectId(_id)})
if img_obj is not None:
# getting latest uploaded pic's _id
img_fs = img_obj.fs_file_ids[2]
if (img_obj.fs.files.exists(img_fs)):
f = img_obj.fs.files.get(ObjectId(img_fs))
return HttpResponse(f.read(),content_type=f.content_type)
else:
return HttpResponse("")
@get_execution_time
def getFullImage(request, group_id, _id, file_name = ""):
# ins_objectid = ObjectId()
# if ins_objectid.is_valid(group_id) is False :
# group_ins = node_collection.find_one({'_type': "Group", "name": group_id})
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if group_ins:
# group_id = str(group_ins._id)
# else :
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth :
# group_id = str(auth._id)
# else :
# pass
try:
group_id = ObjectId(group_id)
except:
group_name, group_id = get_group_name_id(group_id)
img_obj = node_collection.one({"_id": ObjectId(_id)})
if img_obj is not None:
if (img_obj.fs.files.exists(img_obj.fs_file_ids[0])):
f = img_obj.fs.files.get(ObjectId(img_obj.fs_file_ids[0]))
return HttpResponse(f.read(), content_type=f.content_type)
else:
return HttpResponse("")
else:
return HttpResponse("")
@get_execution_time
def get_mid_size_img(request, group_id, _id):
# ins_objectid = ObjectId()
# if ins_objectid.is_valid(group_id) is False :
# group_ins = node_collection.find_one({'_type': "Group","name": group_id})
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if group_ins:
# group_id = str(group_ins._id)
# else :
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth :
# group_id = str(auth._id)
# else :
# pass
try:
group_id = ObjectId(group_id)
except:
group_name, group_id = get_group_name_id(group_id)
img_obj = node_collection.one({"_id": ObjectId(_id)})
try:
f = img_obj.fs.files.get(ObjectId(img_obj.fs_file_ids[2]))
return HttpResponse(f.read(), content_type=f.content_type)
except IndexError:
f = img_obj.fs.files.get(ObjectId(img_obj.fs_file_ids[0]))
return HttpResponse(f.read(), content_type=f.content_type)
@get_execution_time
def image_search(request,group_id):
# ins_objectid = ObjectId()
# if ins_objectid.is_valid(group_id) is False :
# group_ins = node_collection.find_one({'_type': "Group","name": group_id})
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if group_ins:
# group_id = str(group_ins._id)
# else :
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth :
# group_id = str(auth._id)
# else :
# pass
try:
group_id = ObjectId(group_id)
except:
group_name, group_id = get_group_name_id(group_id)
imgcol = node_collection.find({"_type": "File", 'mime_type': {'$regex': 'image'}})
if request.method=="GET":
keyword=request.GET.get("search","")
img_search=node_collection.find({'$and':[{'mime_type':{'$regex': 'image'}},{'$or':[{'name':{'$regex':keyword}},{'tags':{'$regex':keyword}}]}]})
template="ndf/file_search.html"
variable=RequestContext(request,{'file_collection':img_search,'view_name':'image_search','groupid':group_id,'group_id':group_id})
return render_to_response(template,variable)
@get_execution_time
def image_detail(request, group_id, _id):
# ins_objectid = ObjectId()
# if ins_objectid.is_valid(group_id) is False :
# group_ins = node_collection.find_one({'_type': "Group","name": group_id})
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if group_ins:
# group_id = str(group_ins._id)
# else :
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth :
# group_id = str(auth._id)
# else :
# pass
try:
group_id = ObjectId(group_id)
except:
group_name, group_id = get_group_name_id(group_id)
img_node = node_collection.one({"_id": ObjectId(_id)})
# First get the navigation list till topic from theme map
nav_l=request.GET.get('nav_li','')
breadcrumbs_list = []
nav_li = ""
if nav_l:
nav_li = nav_l
if img_node._type == "GSystemType":
return imageDashboard(request, group_id, _id)
img_node.get_neighbourhood(img_node.member_of)
thread_node = None
allow_to_comment = None
thread_node, allow_to_comment = node_thread_access(group_id, img_node)
imageCollection = node_collection.find({'member_of': {'$all': [ObjectId(GST_IMAGE._id)]},
'_type': 'File','fs_file_ids': {'$ne': []},
'group_set': {'$all': [ObjectId(group_id)]},
'$or': [
{'access_policy': u"PUBLIC"},
{'$and': [
{'access_policy': u"PRIVATE"},
{'created_by': request.user.id}
]
}
]
}).sort("last_update", -1)
return render_to_response("ndf/image_detail.html",
{ 'node': img_node,
'group_id': group_id, 'nav_list':nav_li,
'node_has_thread': thread_node,
'allow_to_comment':allow_to_comment,
'groupid':group_id, 'imageCollection': imageCollection
},
context_instance = RequestContext(request)
)
@get_execution_time
def image_edit(request,group_id,_id):
# ins_objectid = ObjectId()
# if ins_objectid.is_valid(group_id) is False :
# group_ins = node_collection.find_one({'_type': "Group","name": group_id})
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if group_ins:
# group_id = str(group_ins._id)
# else :
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth :
# group_id = str(auth._id)
# else :
# pass
try:
group_id = ObjectId(group_id)
except:
group_name, group_id = get_group_name_id(group_id)
group_obj = node_collection.one({'_id': ObjectId(group_id)})
img_node = node_collection.one({"_id": ObjectId(_id)})
ce_id = request.GET.get('course_event_id')
res = request.GET.get('res')
title = GST_IMAGE.name
if request.method == "POST":
# get_node_common_fields(request, img_node, group_id, GST_IMAGE)
img_node.save(is_changed=get_node_common_fields(request, img_node, group_id, GST_IMAGE),groupid=group_id)
thread_create_val = request.POST.get("thread_create",'')
discussion_enable_at = node_collection.one({"_type": "AttributeType", "name": "discussion_enable"})
if thread_create_val == "Yes":
create_gattribute(img_node._id, discussion_enable_at, True)
return_status = create_thread_for_node(request,group_id, img_node)
else:
create_gattribute(img_node._id, discussion_enable_at, False)
if "CourseEventGroup" not in group_obj.member_of_names_list:
get_node_metadata(request,img_node)
teaches_list = request.POST.get('teaches_list','') # get the teaches list
if teaches_list !='':
teaches_list=teaches_list.split(",")
create_grelation_list(img_node._id,"teaches",teaches_list)
assesses_list = request.POST.get('assesses_list','')
if assesses_list !='':
assesses_list=assesses_list.split(",")
create_grelation_list(img_node._id,"assesses",assesses_list)
return HttpResponseRedirect(reverse('image_detail', kwargs={'group_id': group_id, '_id': img_node._id}))
else:
url = "/"+ str(group_id) +"/?selected="+str(img_node._id)+"#view_page"
return HttpResponseRedirect(url)
else:
img_node.get_neighbourhood(img_node.member_of)
return render_to_response("ndf/image_edit.html",
{'node': img_node, 'title': title,
'group_id': group_id,
'groupid': group_id,
'ce_id':ce_id,
'res': res
},
context_instance=RequestContext(request)
)
| agpl-3.0 |
UManPychron/pychron | pychron/pipeline/save_figure.py | 2 | 3467 | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
from six.moves import range
from traits.api import HasTraits, Button, Instance
from traitsui.api import View, Item, UItem, VGroup, InstanceEditor, Tabbed
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.pdf.options import PDFLayoutView
from pychron.core.pdf.save_pdf_dialog import FigurePDFOptions
from pychron.core.save_model import SaveModel, SaveController
from pychron.core.ui.combobox_editor import ComboboxEditor
from pychron.paths import paths
class SaveFigureModel(SaveModel):
pdf_options = Instance(FigurePDFOptions)
def __init__(self, analyses, *args, **kw):
self.repository_identifiers = tuple({ai.repository_identifier for ai in analyses})
self.root_directory = self.repository_identifiers[0]
identifiers = tuple({ai.identifier for ai in analyses})
self.name = '_'.join(identifiers)
m = FigurePDFOptions()
m.load()
self.pdf_options = m
super(SaveFigureModel, self).__init__(*args, **kw)
def dump(self):
self.pdf_options.dump()
class SaveFigureView(SaveController):
def _get_root_item(self):
item = Item('root_directory', label='Directory',
editor=ComboboxEditor(name='repository_identifiers'))
return item
def traits_view(self):
path_group = self._get_path_group()
options_group = VGroup(UItem('pdf_options',
style='custom',
editor=InstanceEditor(view=PDFLayoutView)),
label='Layout')
v = okcancel_view(Tabbed(path_group, options_group),
title='Save PDF Dialog',
width=700)
return v
if __name__ == '__main__':
import random
paths.build('_dev')
class A(object):
def __init__(self):
self.repository_identifier = random.choice(['Foo', 'Bar', 'Bat'])
self.identifier = '1000'
ans = [A() for i in range(5)]
sfm = SaveFigureModel(ans)
sfv = SaveFigureView(model=sfm)
class Demo(HasTraits):
test = Button
def traits_view(self):
return View('test')
def _test_fired(self):
sfv.edit_traits()
# sfv.configure_traits()
print('fff', sfm.prepare_path())
Demo().configure_traits()
# ============= EOF =============================================
| apache-2.0 |
mmauroy/SickRage | tornado/test/websocket_test.py | 19 | 14504 | from __future__ import absolute_import, division, print_function, with_statement
import traceback
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPError, HTTPRequest
from tornado.log import gen_log, app_log
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
from tornado.test.util import unittest
from tornado.web import Application, RequestHandler
from tornado.util import u
try:
import tornado.websocket
from tornado.util import _websocket_mask_python
except ImportError:
# The unittest module presents misleading errors on ImportError
# (it acts as if websocket_test could not be found, hiding the underlying
# error). If we get an ImportError here (which could happen due to
# TORNADO_EXTENSION=1), print some extra information before failing.
traceback.print_exc()
raise
from tornado.websocket import WebSocketHandler, websocket_connect, WebSocketError
try:
from tornado import speedups
except ImportError:
speedups = None
class TestWebSocketHandler(WebSocketHandler):
"""Base class for testing handlers that exposes the on_close event.
This allows for deterministic cleanup of the associated socket.
"""
def initialize(self, close_future, compression_options=None):
self.close_future = close_future
self.compression_options = compression_options
def get_compression_options(self):
return self.compression_options
def on_close(self):
self.close_future.set_result((self.close_code, self.close_reason))
class EchoHandler(TestWebSocketHandler):
def on_message(self, message):
self.write_message(message, isinstance(message, bytes))
class ErrorInOnMessageHandler(TestWebSocketHandler):
def on_message(self, message):
1/0
class HeaderHandler(TestWebSocketHandler):
def open(self):
try:
# In a websocket context, many RequestHandler methods
# raise RuntimeErrors.
self.set_status(503)
raise Exception("did not get expected exception")
except RuntimeError:
pass
self.write_message(self.request.headers.get('X-Test', ''))
class NonWebSocketHandler(RequestHandler):
def get(self):
self.write('ok')
class CloseReasonHandler(TestWebSocketHandler):
def open(self):
self.on_close_called = False
self.close(1001, "goodbye")
class AsyncPrepareHandler(TestWebSocketHandler):
@gen.coroutine
def prepare(self):
yield gen.moment
def on_message(self, message):
self.write_message(message)
class WebSocketBaseTestCase(AsyncHTTPTestCase):
@gen.coroutine
def ws_connect(self, path, compression_options=None):
ws = yield websocket_connect(
'ws://127.0.0.1:%d%s' % (self.get_http_port(), path),
compression_options=compression_options)
raise gen.Return(ws)
@gen.coroutine
def close(self, ws):
"""Close a websocket connection and wait for the server side.
If we don't wait here, there are sometimes leak warnings in the
tests.
"""
ws.close()
yield self.close_future
class WebSocketTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(close_future=self.close_future)),
('/non_ws', NonWebSocketHandler),
('/header', HeaderHandler, dict(close_future=self.close_future)),
('/close_reason', CloseReasonHandler,
dict(close_future=self.close_future)),
('/error_in_on_message', ErrorInOnMessageHandler,
dict(close_future=self.close_future)),
('/async_prepare', AsyncPrepareHandler,
dict(close_future=self.close_future)),
])
def test_http_request(self):
# WS server, HTTP client.
response = self.fetch('/echo')
self.assertEqual(response.code, 400)
@gen_test
def test_websocket_gen(self):
ws = yield self.ws_connect('/echo')
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
def test_websocket_callbacks(self):
websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port(),
io_loop=self.io_loop, callback=self.stop)
ws = self.wait().result()
ws.write_message('hello')
ws.read_message(self.stop)
response = self.wait().result()
self.assertEqual(response, 'hello')
self.close_future.add_done_callback(lambda f: self.stop())
ws.close()
self.wait()
@gen_test
def test_binary_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(b'hello \xe9', binary=True)
response = yield ws.read_message()
self.assertEqual(response, b'hello \xe9')
yield self.close(ws)
@gen_test
def test_unicode_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(u('hello \u00e9'))
response = yield ws.read_message()
self.assertEqual(response, u('hello \u00e9'))
yield self.close(ws)
@gen_test
def test_error_in_on_message(self):
ws = yield self.ws_connect('/error_in_on_message')
ws.write_message('hello')
with ExpectLog(app_log, "Uncaught exception"):
response = yield ws.read_message()
self.assertIs(response, None)
yield self.close(ws)
@gen_test
def test_websocket_http_fail(self):
with self.assertRaises(HTTPError) as cm:
yield self.ws_connect('/notfound')
self.assertEqual(cm.exception.code, 404)
@gen_test
def test_websocket_http_success(self):
with self.assertRaises(WebSocketError):
yield self.ws_connect('/non_ws')
@gen_test
def test_websocket_network_fail(self):
sock, port = bind_unused_port()
sock.close()
with self.assertRaises(IOError):
with ExpectLog(gen_log, ".*"):
yield websocket_connect(
'ws://127.0.0.1:%d/' % port,
io_loop=self.io_loop,
connect_timeout=3600)
@gen_test
def test_websocket_close_buffered_data(self):
ws = yield websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port())
ws.write_message('hello')
ws.write_message('world')
# Close the underlying stream.
ws.stream.close()
yield self.close_future
@gen_test
def test_websocket_headers(self):
# Ensure that arbitrary headers can be passed through websocket_connect.
ws = yield websocket_connect(
HTTPRequest('ws://127.0.0.1:%d/header' % self.get_http_port(),
headers={'X-Test': 'hello'}))
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_server_close_reason(self):
ws = yield self.ws_connect('/close_reason')
msg = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(msg, None)
self.assertEqual(ws.close_code, 1001)
self.assertEqual(ws.close_reason, "goodbye")
# The on_close callback is called no matter which side closed.
yield self.close_future
@gen_test
def test_client_close_reason(self):
ws = yield self.ws_connect('/echo')
ws.close(1001, 'goodbye')
code, reason = yield self.close_future
self.assertEqual(code, 1001)
self.assertEqual(reason, 'goodbye')
@gen_test
def test_async_prepare(self):
# Previously, an async prepare method triggered a bug that would
# result in a timeout on test shutdown (and a memory leak).
ws = yield self.ws_connect('/async_prepare')
ws.write_message('hello')
res = yield ws.read_message()
self.assertEqual(res, 'hello')
@gen_test
def test_check_origin_valid_no_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_valid_with_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d/something' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_invalid_partial_url(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': '127.0.0.1:%d' % port}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
# Host is 127.0.0.1, which should not be accessible from some other
# domain
headers = {'Origin': 'http://somewhereelse.com'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid_subdomains(self):
port = self.get_http_port()
url = 'ws://localhost:%d/echo' % port
# Subdomains should be disallowed by default. If we could pass a
# resolver to websocket_connect we could test sibling domains as well.
headers = {'Origin': 'http://subtenant.localhost'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
class CompressionTestMixin(object):
MESSAGE = 'Hello world. Testing 123 123'
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(
close_future=self.close_future,
compression_options=self.get_server_compression_options())),
])
def get_server_compression_options(self):
return None
def get_client_compression_options(self):
return None
@gen_test
def test_message_sizes(self):
ws = yield self.ws_connect(
'/echo',
compression_options=self.get_client_compression_options())
# Send the same message three times so we can measure the
# effect of the context_takeover options.
for i in range(3):
ws.write_message(self.MESSAGE)
response = yield ws.read_message()
self.assertEqual(response, self.MESSAGE)
self.assertEqual(ws.protocol._message_bytes_out, len(self.MESSAGE) * 3)
self.assertEqual(ws.protocol._message_bytes_in, len(self.MESSAGE) * 3)
self.verify_wire_bytes(ws.protocol._wire_bytes_in,
ws.protocol._wire_bytes_out)
yield self.close(ws)
class UncompressedTestMixin(CompressionTestMixin):
"""Specialization of CompressionTestMixin when we expect no compression."""
def verify_wire_bytes(self, bytes_in, bytes_out):
# Bytes out includes the 4-byte mask key per message.
self.assertEqual(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertEqual(bytes_in, 3 * (len(self.MESSAGE) + 2))
class NoCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
pass
# If only one side tries to compress, the extension is not negotiated.
class ServerOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
class ClientOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_client_compression_options(self):
return {}
class DefaultCompressionTest(CompressionTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
def get_client_compression_options(self):
return {}
def verify_wire_bytes(self, bytes_in, bytes_out):
self.assertLess(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertLess(bytes_in, 3 * (len(self.MESSAGE) + 2))
# Bytes out includes the 4 bytes mask key per message.
self.assertEqual(bytes_out, bytes_in + 12)
class MaskFunctionMixin(object):
# Subclasses should define self.mask(mask, data)
def test_mask(self):
self.assertEqual(self.mask(b'abcd', b''), b'')
self.assertEqual(self.mask(b'abcd', b'b'), b'\x03')
self.assertEqual(self.mask(b'abcd', b'54321'), b'TVPVP')
self.assertEqual(self.mask(b'ZXCV', b'98765432'), b'c`t`olpd')
# Include test cases with \x00 bytes (to ensure that the C
# extension isn't depending on null-terminated strings) and
# bytes with the high bit set (to smoke out signedness issues).
self.assertEqual(self.mask(b'\x00\x01\x02\x03',
b'\xff\xfb\xfd\xfc\xfe\xfa'),
b'\xff\xfa\xff\xff\xfe\xfb')
self.assertEqual(self.mask(b'\xff\xfb\xfd\xfc',
b'\x00\x01\x02\x03\x04\x05'),
b'\xff\xfa\xff\xff\xfb\xfe')
class PythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return _websocket_mask_python(mask, data)
@unittest.skipIf(speedups is None, "tornado.speedups module not present")
class CythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return speedups.websocket_mask(mask, data)
| gpl-3.0 |
grlee77/scipy | scipy/cluster/tests/test_vq.py | 1 | 12393 |
import warnings
import sys
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_equal, assert_,
suppress_warnings)
import pytest
from pytest import raises as assert_raises
from scipy.cluster.vq import (kmeans, kmeans2, py_vq, vq, whiten,
ClusterError, _krandinit)
from scipy.cluster import _vq
from scipy.sparse.sputils import matrix
TESTDATA_2D = np.array([
-2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68,
-2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45,
2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28,
-4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07,
-0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29,
-2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25,
2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21,
-2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67,
-2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94,
-2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33,
2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8,
-1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29,
2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75,
-1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17,
0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44,
-0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83,
0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28,
3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62,
-1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35,
3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84,
-2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75,
-0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86,
-2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83,
0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75,
-2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03,
3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0,
3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99,
-1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21,
1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75,
4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37,
-3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0,
-1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84,
2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69,
-2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51,
-0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71,
-2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61,
2.11]).reshape((200, 2))
# Global data
X = np.array([[3.0, 3], [4, 3], [4, 2],
[9, 2], [5, 1], [6, 2], [9, 4],
[5, 2], [5, 4], [7, 4], [6, 5]])
CODET1 = np.array([[3.0000, 3.0000],
[6.2000, 4.0000],
[5.8000, 1.8000]])
CODET2 = np.array([[11.0/3, 8.0/3],
[6.7500, 4.2500],
[6.2500, 1.7500]])
LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1])
class TestWhiten:
def test_whiten(self):
desired = np.array([[5.08738849, 2.97091878],
[3.19909255, 0.69660580],
[4.51041982, 0.02640918],
[4.38567074, 0.95120889],
[2.32191480, 1.63195503]])
for tp in np.array, matrix:
obs = tp([[0.98744510, 0.82766775],
[0.62093317, 0.19406729],
[0.87545741, 0.00735733],
[0.85124403, 0.26499712],
[0.45067590, 0.45464607]])
assert_allclose(whiten(obs), desired, rtol=1e-5)
def test_whiten_zero_std(self):
desired = np.array([[0., 1.0, 2.86666544],
[0., 1.0, 1.32460034],
[0., 1.0, 3.74382172]])
for tp in np.array, matrix:
obs = tp([[0., 1., 0.74109533],
[0., 1., 0.34243798],
[0., 1., 0.96785929]])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_allclose(whiten(obs), desired, rtol=1e-5)
assert_equal(len(w), 1)
assert_(issubclass(w[-1].category, RuntimeWarning))
def test_whiten_not_finite(self):
for tp in np.array, matrix:
for bad_value in np.nan, np.inf, -np.inf:
obs = tp([[0.98744510, bad_value],
[0.62093317, 0.19406729],
[0.87545741, 0.00735733],
[0.85124403, 0.26499712],
[0.45067590, 0.45464607]])
assert_raises(ValueError, whiten, obs)
class TestVq:
def test_py_vq(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, matrix:
label1 = py_vq(tp(X), tp(initc))[0]
assert_array_equal(label1, LABEL1)
def test_vq(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, matrix:
label1, dist = _vq.vq(tp(X), tp(initc))
assert_array_equal(label1, LABEL1)
tlabel1, tdist = vq(tp(X), tp(initc))
def test_vq_1d(self):
# Test special rank 1 vq algo, python implementation.
data = X[:, 0]
initc = data[:3]
a, b = _vq.vq(data, initc)
ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
assert_array_equal(a, ta)
assert_array_equal(b, tb)
def test__vq_sametype(self):
a = np.array([1.0, 2.0], dtype=np.float64)
b = a.astype(np.float32)
assert_raises(TypeError, _vq.vq, a, b)
def test__vq_invalid_type(self):
a = np.array([1, 2], dtype=int)
assert_raises(TypeError, _vq.vq, a, a)
def test_vq_large_nfeat(self):
X = np.random.rand(20, 20)
code_book = np.random.rand(3, 20)
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(X, code_book)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
X = X.astype(np.float32)
code_book = code_book.astype(np.float32)
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(X, code_book)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
def test_vq_large_features(self):
X = np.random.rand(10, 5) * 1000000
code_book = np.random.rand(2, 5) * 1000000
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(X, code_book)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
class TestKMean:
def test_large_features(self):
# Generate a data set with large values, and run kmeans on it to
# (regression for 1077).
d = 300
n = 100
m1 = np.random.randn(d)
m2 = np.random.randn(d)
x = 10000 * np.random.randn(n, d) - 20000 * m1
y = 10000 * np.random.randn(n, d) + 20000 * m2
data = np.empty((x.shape[0] + y.shape[0], d), np.double)
data[:x.shape[0]] = x
data[x.shape[0]:] = y
kmeans(data, 2)
def test_kmeans_simple(self):
np.random.seed(54321)
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, matrix:
code1 = kmeans(tp(X), tp(initc), iter=1)[0]
assert_array_almost_equal(code1, CODET2)
def test_kmeans_lost_cluster(self):
# This will cause kmeans to have a cluster with no points.
data = TESTDATA_2D
initk = np.array([[-1.8127404, -0.67128041],
[2.04621601, 0.07401111],
[-2.31149087, -0.05160469]])
kmeans(data, initk)
with suppress_warnings() as sup:
sup.filter(UserWarning,
"One of the clusters is empty. Re-run kmeans with a "
"different initialization")
kmeans2(data, initk, missing='warn')
assert_raises(ClusterError, kmeans2, data, initk, missing='raise')
def test_kmeans2_simple(self):
np.random.seed(12345678)
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, matrix:
code1 = kmeans2(tp(X), tp(initc), iter=1)[0]
code2 = kmeans2(tp(X), tp(initc), iter=2)[0]
assert_array_almost_equal(code1, CODET1)
assert_array_almost_equal(code2, CODET2)
def test_kmeans2_rank1(self):
data = TESTDATA_2D
data1 = data[:, 0]
initc = data1[:3]
code = initc.copy()
kmeans2(data1, code, iter=1)[0]
kmeans2(data1, code, iter=2)[0]
def test_kmeans2_rank1_2(self):
data = TESTDATA_2D
data1 = data[:, 0]
kmeans2(data1, 2, iter=1)
def test_kmeans2_high_dim(self):
# test kmeans2 when the number of dimensions exceeds the number
# of input points
data = TESTDATA_2D
data = data.reshape((20, 20))[:10]
kmeans2(data, 2)
def test_kmeans2_init(self):
np.random.seed(12345)
data = TESTDATA_2D
kmeans2(data, 3, minit='points')
kmeans2(data[:, :1], 3, minit='points') # special case (1-D)
kmeans2(data, 3, minit='++')
kmeans2(data[:, :1], 3, minit='++') # special case (1-D)
# minit='random' can give warnings, filter those
with suppress_warnings() as sup:
sup.filter(message="One of the clusters is empty. Re-run.")
kmeans2(data, 3, minit='random')
kmeans2(data[:, :1], 3, minit='random') # special case (1-D)
@pytest.mark.skipif(sys.platform == 'win32',
reason='Fails with MemoryError in Wine.')
def test_krandinit(self):
data = TESTDATA_2D
datas = [data.reshape((200, 2)), data.reshape((20, 20))[:10]]
k = int(1e6)
for data in datas:
np.random.seed(1234)
init = _krandinit(data, k)
orig_cov = np.cov(data, rowvar=0)
init_cov = np.cov(init, rowvar=0)
assert_allclose(orig_cov, init_cov, atol=1e-2)
def test_kmeans2_empty(self):
# Regression test for gh-1032.
assert_raises(ValueError, kmeans2, [], 2)
def test_kmeans_0k(self):
# Regression test for gh-1073: fail when k arg is 0.
assert_raises(ValueError, kmeans, X, 0)
assert_raises(ValueError, kmeans2, X, 0)
assert_raises(ValueError, kmeans2, X, np.array([]))
def test_kmeans_large_thres(self):
# Regression test for gh-1774
x = np.array([1, 2, 3, 4, 10], dtype=float)
res = kmeans(x, 1, thresh=1e16)
assert_allclose(res[0], np.array([4.]))
assert_allclose(res[1], 2.3999999999999999)
def test_kmeans2_kpp_low_dim(self):
# Regression test for gh-11462
prev_res = np.array([[-1.95266667, 0.898],
[-3.153375, 3.3945]])
np.random.seed(42)
res, _ = kmeans2(TESTDATA_2D, 2, minit='++')
assert_allclose(res, prev_res)
def test_kmeans2_kpp_high_dim(self):
# Regression test for gh-11462
n_dim = 100
size = 10
centers = np.vstack([5 * np.ones(n_dim),
-5 * np.ones(n_dim)])
np.random.seed(42)
data = np.vstack([
np.random.multivariate_normal(centers[0], np.eye(n_dim), size=size),
np.random.multivariate_normal(centers[1], np.eye(n_dim), size=size)
])
res, _ = kmeans2(data, 2, minit='++')
assert_array_almost_equal(res, centers, decimal=0)
| bsd-3-clause |
dgjustice/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_net.py | 15 | 10252 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ec2_vpc_net
short_description: Configure AWS virtual private clouds
description:
- Create or terminate AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "2.0"
author: Jonathan Davila (@defionscode)
options:
name:
description:
- The name to give your VPC. This is used in combination with the cidr_block parameter to determine if a VPC already exists.
required: yes
cidr_block:
description:
- The CIDR of the VPC
required: yes
tenancy:
description:
- Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
required: false
default: default
choices: [ 'default', 'dedicated' ]
dns_support:
description:
- Whether to enable AWS DNS support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dns_hostnames:
description:
- Whether to enable AWS hostname support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dhcp_opts_id:
description:
- the id of the DHCP options to use for this vpc
default: null
required: false
tags:
description:
- The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different.
default: None
required: false
aliases: [ 'resource_tags' ]
state:
description:
- The state of the VPC. Either absent or present.
default: present
required: false
choices: [ 'present', 'absent' ]
multi_ok:
description:
- By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created.
default: false
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create a VPC with dedicate tenancy and a couple of tags
- ec2_vpc_net:
name: Module_dev2
cidr_block: 10.10.0.0/16
region: us-east-1
tags:
module: ec2_vpc_net
this: works
tenancy: dedicated
'''
try:
import boto
import boto.ec2
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def vpc_exists(module, vpc, name, cidr_block, multi):
"""Returns None or a vpc object depending on the existence of a VPC. When supplied
with a CIDR, it will check for matching tags to determine if it is a match
otherwise it will assume the VPC does not exist and thus return None.
"""
matched_vpc = None
try:
matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block})
except Exception as e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if multi:
return None
elif len(matching_vpcs) == 1:
matched_vpc = matching_vpcs[0]
elif len(matching_vpcs) > 1:
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
'CIDR block you specified. If you would like to create '
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
return matched_vpc
def update_vpc_tags(vpc, module, vpc_obj, tags, name):
if tags is None:
tags = dict()
tags.update({'Name': name})
try:
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id}))
if tags != current_tags:
if not module.check_mode:
vpc.create_tags(vpc_obj.id, tags)
return True
else:
return False
except Exception as e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
if vpc_obj.dhcp_options_id != dhcp_id:
if not module.check_mode:
connection.associate_dhcp_options(dhcp_id, vpc_obj.id)
return True
else:
return False
def get_vpc_values(vpc_obj):
if vpc_obj is not None:
vpc_values = vpc_obj.__dict__
if "region" in vpc_values:
vpc_values.pop("region")
if "item" in vpc_values:
vpc_values.pop("item")
if "connection" in vpc_values:
vpc_values.pop("connection")
return vpc_values
else:
return None
def main():
argument_spec=ec2_argument_spec()
argument_spec.update(dict(
name = dict(type='str', default=None, required=True),
cidr_block = dict(type='str', default=None, required=True),
tenancy = dict(choices=['default', 'dedicated'], default='default'),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
dhcp_opts_id = dict(type='str', default=None, required=False),
tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']),
state = dict(choices=['present', 'absent'], default='present'),
multi_ok = dict(type='bool', default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
name=module.params.get('name')
cidr_block=module.params.get('cidr_block')
tenancy=module.params.get('tenancy')
dns_support=module.params.get('dns_support')
dns_hostnames=module.params.get('dns_hostnames')
dhcp_id=module.params.get('dhcp_opts_id')
tags=module.params.get('tags')
state=module.params.get('state')
multi=module.params.get('multi_ok')
changed=False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if dns_hostnames and not dns_support:
module.fail_json('In order to enable DNS Hostnames you must also enable DNS support')
if state == 'present':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is None:
try:
changed = True
if not module.check_mode:
vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy)
else:
module.exit_json(changed=changed)
except BotoServerError as e:
module.fail_json(msg=e)
if dhcp_id is not None:
try:
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
changed = True
except BotoServerError as e:
module.fail_json(msg=e)
if tags is not None or name is not None:
try:
if update_vpc_tags(connection, module, vpc_obj, tags, name):
changed = True
except BotoServerError as e:
module.fail_json(msg=e)
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
# which is needed in order to detect the current status of DNS options. For now we just update
# the attribute each time and is not used as a changed-factor.
try:
if not module.check_mode:
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support)
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames)
except BotoServerError as e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if not module.check_mode:
# get the vpc obj again in case it has changed
try:
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
except BotoServerError as e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
elif state == 'absent':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is not None:
try:
if not module.check_mode:
connection.delete_vpc(vpc_obj.id)
vpc_obj = None
changed = True
except BotoServerError as e:
e_msg = boto_exception(e)
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
cmauec/Cloud-Vision-Api | cloudstorage/rest_api.py | 9 | 9146 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Base and helper classes for Google RESTful APIs."""
__all__ = ['add_sync_methods']
import logging
import os
import random
import time
from . import api_utils
try:
from google.appengine.api import app_identity
from google.appengine.api import lib_config
from google.appengine.ext import ndb
except ImportError:
from google.appengine.api import app_identity
from google.appengine.api import lib_config
from google.appengine.ext import ndb
@ndb.tasklet
def _make_token_async(scopes, service_account_id):
"""Get a fresh authentication token.
Args:
scopes: A list of scopes.
service_account_id: Internal-use only.
Raises:
An ndb.Return with a tuple (token, expiration_time) where expiration_time is
seconds since the epoch.
"""
rpc = app_identity.create_rpc()
app_identity.make_get_access_token_call(rpc, scopes, service_account_id)
token, expires_at = yield rpc
raise ndb.Return((token, expires_at))
class _ConfigDefaults(object):
TOKEN_MAKER = _make_token_async
_config = lib_config.register('cloudstorage', _ConfigDefaults.__dict__)
def _make_sync_method(name):
"""Helper to synthesize a synchronous method from an async method name.
Used by the @add_sync_methods class decorator below.
Args:
name: The name of the synchronous method.
Returns:
A method (with first argument 'self') that retrieves and calls
self.<name>, passing its own arguments, expects it to return a
Future, and then waits for and returns that Future's result.
"""
def sync_wrapper(self, *args, **kwds):
method = getattr(self, name)
future = method(*args, **kwds)
return future.get_result()
return sync_wrapper
def add_sync_methods(cls):
"""Class decorator to add synchronous methods corresponding to async methods.
This modifies the class in place, adding additional methods to it.
If a synchronous method of a given name already exists it is not
replaced.
Args:
cls: A class.
Returns:
The same class, modified in place.
"""
for name in cls.__dict__.keys():
if name.endswith('_async'):
sync_name = name[:-6]
if not hasattr(cls, sync_name):
setattr(cls, sync_name, _make_sync_method(name))
return cls
class _AE_TokenStorage_(ndb.Model):
"""Entity to store app_identity tokens in memcache."""
token = ndb.StringProperty()
expires = ndb.FloatProperty()
class _RestApi(object):
"""Base class for REST-based API wrapper classes.
This class manages authentication tokens and request retries. All
APIs are available as synchronous and async methods; synchronous
methods are synthesized from async ones by the add_sync_methods()
function in this module.
WARNING: Do NOT directly use this api. It's an implementation detail
and is subject to change at any release.
"""
def __init__(self, scopes, service_account_id=None, token_maker=None,
retry_params=None):
"""Constructor.
Args:
scopes: A scope or a list of scopes.
service_account_id: Internal use only.
token_maker: An asynchronous function of the form
(scopes, service_account_id) -> (token, expires).
retry_params: An instance of api_utils.RetryParams. If None, the
default for current thread will be used.
"""
if isinstance(scopes, basestring):
scopes = [scopes]
self.scopes = scopes
self.service_account_id = service_account_id
self.make_token_async = token_maker or _config.TOKEN_MAKER
if not retry_params:
retry_params = api_utils._get_default_retry_params()
self.retry_params = retry_params
self.user_agent = {'User-Agent': retry_params._user_agent}
self.expiration_headroom = random.randint(60, 240)
def __getstate__(self):
"""Store state as part of serialization/pickling."""
return {'scopes': self.scopes,
'id': self.service_account_id,
'a_maker': (None if self.make_token_async == _make_token_async
else self.make_token_async),
'retry_params': self.retry_params,
'expiration_headroom': self.expiration_headroom}
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling."""
self.__init__(state['scopes'],
service_account_id=state['id'],
token_maker=state['a_maker'],
retry_params=state['retry_params'])
self.expiration_headroom = state['expiration_headroom']
@ndb.tasklet
def do_request_async(self, url, method='GET', headers=None, payload=None,
deadline=None, callback=None):
"""Issue one HTTP request.
It performs async retries using tasklets.
Args:
url: the url to fetch.
method: the method in which to fetch.
headers: the http headers.
payload: the data to submit in the fetch.
deadline: the deadline in which to make the call.
callback: the call to make once completed.
Yields:
The async fetch of the url.
"""
retry_wrapper = api_utils._RetryWrapper(
self.retry_params,
retriable_exceptions=api_utils._RETRIABLE_EXCEPTIONS,
should_retry=api_utils._should_retry)
resp = yield retry_wrapper.run(
self.urlfetch_async,
url=url,
method=method,
headers=headers,
payload=payload,
deadline=deadline,
callback=callback,
follow_redirects=False)
raise ndb.Return((resp.status_code, resp.headers, resp.content))
@ndb.tasklet
def get_token_async(self, refresh=False):
"""Get an authentication token.
The token is cached in memcache, keyed by the scopes argument.
Uses a random token expiration headroom value generated in the constructor
to eliminate a burst of GET_ACCESS_TOKEN API requests.
Args:
refresh: If True, ignore a cached token; default False.
Yields:
An authentication token. This token is guaranteed to be non-expired.
"""
key = '%s,%s' % (self.service_account_id, ','.join(self.scopes))
ts = yield _AE_TokenStorage_.get_by_id_async(
key,
use_cache=True,
use_memcache=self.retry_params.memcache_access_token,
use_datastore=self.retry_params.save_access_token)
if refresh or ts is None or ts.expires < (
time.time() + self.expiration_headroom):
token, expires_at = yield self.make_token_async(
self.scopes, self.service_account_id)
timeout = int(expires_at - time.time())
ts = _AE_TokenStorage_(id=key, token=token, expires=expires_at)
if timeout > 0:
yield ts.put_async(memcache_timeout=timeout,
use_datastore=self.retry_params.save_access_token,
force_writes=True,
use_cache=True,
use_memcache=self.retry_params.memcache_access_token)
raise ndb.Return(ts.token)
@ndb.tasklet
def urlfetch_async(self, url, method='GET', headers=None,
payload=None, deadline=None, callback=None,
follow_redirects=False):
"""Make an async urlfetch() call.
This is an async wrapper around urlfetch(). It adds an authentication
header.
Args:
url: the url to fetch.
method: the method in which to fetch.
headers: the http headers.
payload: the data to submit in the fetch.
deadline: the deadline in which to make the call.
callback: the call to make once completed.
follow_redirects: whether or not to follow redirects.
Yields:
This returns a Future despite not being decorated with @ndb.tasklet!
"""
headers = {} if headers is None else dict(headers)
headers.update(self.user_agent)
try:
self.token = yield self.get_token_async()
except app_identity.InternalError, e:
if os.environ.get('DATACENTER', '').endswith('sandman'):
self.token = None
logging.warning('Could not fetch an authentication token in sandman '
'based Appengine devel setup; proceeding without one.')
else:
raise e
if self.token:
headers['authorization'] = 'OAuth ' + self.token
deadline = deadline or self.retry_params.urlfetch_timeout
ctx = ndb.get_context()
resp = yield ctx.urlfetch(
url, payload=payload, method=method,
headers=headers, follow_redirects=follow_redirects,
deadline=deadline, callback=callback)
raise ndb.Return(resp)
_RestApi = add_sync_methods(_RestApi)
| gpl-2.0 |
cntnboys/410Lab4 | env-lab4/lib/python2.7/site-packages/markupsafe/tests.py | 674 | 6107 | # -*- coding: utf-8 -*-
import gc
import sys
import unittest
from markupsafe import Markup, escape, escape_silent
from markupsafe._compat import text_type
class MarkupTestCase(unittest.TestCase):
def test_adding(self):
# adding two strings should escape the unsafe one
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup('<em>username</em>')
assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe)
def test_string_interpolation(self):
# string interpolations are safe to use too
assert Markup('<em>%s</em>') % '<bad user>' == \
'<em><bad user></em>'
assert Markup('<em>%(username)s</em>') % {
'username': '<bad user>'
} == '<em><bad user></em>'
assert Markup('%i') % 3.14 == '3'
assert Markup('%.2f') % 3.14 == '3.14'
def test_type_behavior(self):
# an escaped object is markup too
assert type(Markup('foo') + 'bar') is Markup
# and it implements __html__ by returning itself
x = Markup("foo")
assert x.__html__() is x
def test_html_interop(self):
# it also knows how to treat __html__ objects
class Foo(object):
def __html__(self):
return '<em>awesome</em>'
def __unicode__(self):
return 'awesome'
__str__ = __unicode__
assert Markup(Foo()) == '<em>awesome</em>'
assert Markup('<strong>%s</strong>') % Foo() == \
'<strong><em>awesome</em></strong>'
def test_tuple_interpol(self):
self.assertEqual(Markup('<em>%s:%s</em>') % (
'<foo>',
'<bar>',
), Markup(u'<em><foo>:<bar></em>'))
def test_dict_interpol(self):
self.assertEqual(Markup('<em>%(foo)s</em>') % {
'foo': '<foo>',
}, Markup(u'<em><foo></em>'))
self.assertEqual(Markup('<em>%(foo)s:%(bar)s</em>') % {
'foo': '<foo>',
'bar': '<bar>',
}, Markup(u'<em><foo>:<bar></em>'))
def test_escaping(self):
# escaping and unescaping
assert escape('"<>&\'') == '"<>&''
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
assert Markup("<test>").unescape() == "<test>"
def test_formatting(self):
for actual, expected in (
(Markup('%i') % 3.14, '3'),
(Markup('%.2f') % 3.14159, '3.14'),
(Markup('%s %s %s') % ('<', 123, '>'), '< 123 >'),
(Markup('<em>{awesome}</em>').format(awesome='<awesome>'),
'<em><awesome></em>'),
(Markup('{0[1][bar]}').format([0, {'bar': '<bar/>'}]),
'<bar/>'),
(Markup('{0[1][bar]}').format([0, {'bar': Markup('<bar/>')}]),
'<bar/>')):
assert actual == expected, "%r should be %r!" % (actual, expected)
# This is new in 2.7
if sys.version_info >= (2, 7):
def test_formatting_empty(self):
formatted = Markup('{}').format(0)
assert formatted == Markup('0')
def test_custom_formatting(self):
class HasHTMLOnly(object):
def __html__(self):
return Markup('<foo>')
class HasHTMLAndFormat(object):
def __html__(self):
return Markup('<foo>')
def __html_format__(self, spec):
return Markup('<FORMAT>')
assert Markup('{0}').format(HasHTMLOnly()) == Markup('<foo>')
assert Markup('{0}').format(HasHTMLAndFormat()) == Markup('<FORMAT>')
def test_complex_custom_formatting(self):
class User(object):
def __init__(self, id, username):
self.id = id
self.username = username
def __html_format__(self, format_spec):
if format_spec == 'link':
return Markup('<a href="/user/{0}">{1}</a>').format(
self.id,
self.__html__(),
)
elif format_spec:
raise ValueError('Invalid format spec')
return self.__html__()
def __html__(self):
return Markup('<span class=user>{0}</span>').format(self.username)
user = User(1, 'foo')
assert Markup('<p>User: {0:link}').format(user) == \
Markup('<p>User: <a href="/user/1"><span class=user>foo</span></a>')
def test_all_set(self):
import markupsafe as markup
for item in markup.__all__:
getattr(markup, item)
def test_escape_silent(self):
assert escape_silent(None) == Markup()
assert escape(None) == Markup(None)
assert escape_silent('<foo>') == Markup(u'<foo>')
def test_splitting(self):
self.assertEqual(Markup('a b').split(), [
Markup('a'),
Markup('b')
])
self.assertEqual(Markup('a b').rsplit(), [
Markup('a'),
Markup('b')
])
self.assertEqual(Markup('a\nb').splitlines(), [
Markup('a'),
Markup('b')
])
def test_mul(self):
self.assertEqual(Markup('a') * 3, Markup('aaa'))
class MarkupLeakTestCase(unittest.TestCase):
def test_markup_leaks(self):
counts = set()
for count in range(20):
for item in range(1000):
escape("foo")
escape("<foo>")
escape(u"foo")
escape(u"<foo>")
counts.add(len(gc.get_objects()))
assert len(counts) == 1, 'ouch, c extension seems to leak objects'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MarkupTestCase))
# this test only tests the c extension
if not hasattr(escape, 'func_code'):
suite.addTest(unittest.makeSuite(MarkupLeakTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
# vim:sts=4:sw=4:et:
| apache-2.0 |
PrincetonUniversity/AdvNet-OF_Scripts | evaluation/switch/flowmod_test/pox/pox/misc/packet_dump.py | 26 | 2913 | # Copyright 2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
A simple component that dumps packet_in info to the log.
Use --verbose for really verbose dumps.
Use --show to show all packets.
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
import pox.lib.packet as pkt
from pox.lib.util import dpidToStr
log = core.getLogger()
_verbose = None
_max_length = None
_types = None
_show_by_default = None
def _handle_PacketIn (event):
packet = event.parsed
show = _show_by_default
p = packet
while p:
if p.__class__.__name__.lower() in _types:
if _show_by_default:
# This packet is hidden
return
else:
# This packet should be shown
show = True
break
return
if not hasattr(p, 'next'): break
p = p.next
if not show: return
msg = dpidToStr(event.dpid) + ": "
msg = ""
if _verbose:
msg += packet.dump()
else:
p = packet
while p:
if isinstance(p, basestring):
msg += "[%s bytes]" % (len(p),)
break
msg += "[%s]" % (p.__class__.__name__,)
p = p.next
if _max_length:
if len(msg) > _max_length:
msg = msg[:_max_length-3]
msg += "..."
core.getLogger("dump:" + dpidToStr(event.dpid)).debug(msg)
def launch (verbose = False, max_length = 110, full_packets = True,
hide = False, show = False):
global _verbose, _max_length, _types, _show_by_default
_verbose = verbose
_max_length = max_length
force_show = (show is True) or (hide is False and show is False)
if isinstance(hide, basestring):
hide = hide.replace(',', ' ').replace('|', ' ')
hide = set([p.lower() for p in hide.split()])
else:
hide = set()
if isinstance(show, basestring):
show = show.replace(',', ' ').replace('|', ' ')
show = set([p.lower() for p in show.split()])
else:
show = set()
if hide and show:
raise RuntimeError("Can't both show and hide packet types")
if show:
_types = show
else:
_types = hide
_show_by_default = not not hide
if force_show:
_show_by_default = force_show
if full_packets:
# Send full packets to controller
core.openflow.miss_send_len = 0xffff
core.openflow.addListenerByName("PacketIn", _handle_PacketIn)
log.info("Packet dumper running")
| apache-2.0 |
SujaySKumar/django | tests/forms_tests/field_tests/test_datefield.py | 156 | 4346 | from datetime import date
from django.forms import DateField, Form, HiddenInput, SelectDateWidget
from django.test import SimpleTestCase, override_settings
from django.utils import translation
class GetDate(Form):
mydate = DateField(widget=SelectDateWidget)
class DateFieldTest(SimpleTestCase):
def test_form_field(self):
a = GetDate({'mydate_month': '4', 'mydate_day': '1', 'mydate_year': '2008'})
self.assertTrue(a.is_valid())
self.assertEqual(a.cleaned_data['mydate'], date(2008, 4, 1))
# As with any widget that implements get_value_from_datadict(), we must
# accept the input from the "as_hidden" rendering as well.
self.assertHTMLEqual(
a['mydate'].as_hidden(),
'<input type="hidden" name="mydate" value="2008-4-1" id="id_mydate" />',
)
b = GetDate({'mydate': '2008-4-1'})
self.assertTrue(b.is_valid())
self.assertEqual(b.cleaned_data['mydate'], date(2008, 4, 1))
# Invalid dates shouldn't be allowed
c = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(c.is_valid())
self.assertEqual(c.errors, {'mydate': ['Enter a valid date.']})
# label tag is correctly associated with month dropdown
d = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_month">', d.as_p())
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_l10n_date_changed(self):
"""
Ensure that DateField.has_changed() with SelectDateWidget works
correctly with a localized date format (#17165).
"""
# With Field.show_hidden_initial=False
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
}, initial={'mydate': date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '2',
}, initial={'mydate': date(2008, 4, 1)})
self.assertTrue(b.has_changed())
# With Field.show_hidden_initial=True
class GetDateShowHiddenInitial(Form):
mydate = DateField(widget=SelectDateWidget, show_hidden_initial=True)
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
'initial-mydate': HiddenInput()._format_value(date(2008, 4, 1)),
}, initial={'mydate': date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(date(2008, 4, 1)),
}, initial={'mydate': date(2008, 4, 1)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(date(2008, 4, 1)),
}, initial={'mydate': date(2008, 4, 22)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(date(2008, 4, 22)),
}, initial={'mydate': date(2008, 4, 1)})
self.assertFalse(b.has_changed())
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_l10n_invalid_date_in(self):
# Invalid dates shouldn't be allowed
a = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(a.is_valid())
# 'Geef een geldige datum op.' = 'Enter a valid date.'
self.assertEqual(a.errors, {'mydate': ['Geef een geldige datum op.']})
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_form_label_association(self):
# label tag is correctly associated with first rendered dropdown
a = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_day">', a.as_p())
| bsd-3-clause |
drwyrm/Flexget | flexget/plugins/output/sabnzbd.py | 7 | 3554 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import urlencode
import logging
from flexget import plugin
from flexget.event import event
from requests import RequestException
log = logging.getLogger('sabnzbd')
class OutputSabnzbd(object):
"""
Example::
sabnzbd:
apikey: 123456
url: http://localhost/sabnzbd/api?
category: movies
All parameters::
sabnzbd:
apikey: ...
url: ...
category: ...
script: ...
pp: ...
priority: ...
"""
schema = {
'type': 'object',
'properties': {
'key': {'type': 'string'},
'url': {'type': 'string', 'format': 'url'},
'category': {'type': 'string'},
'script': {'type': 'string'},
'pp': {'type': 'string'},
'priority': {'type': 'integer'},
'password': {'type': 'string'},
'username': {'type': 'string'},
},
'required': ['key', 'url'],
'additionalProperties': False,
}
def get_params(self, config):
params = {}
if 'key' in config:
params['apikey'] = config['key']
if 'category' in config:
params['cat'] = '%s' % config['category']
if 'script' in config:
params['script'] = config['script']
if 'pp' in config:
params['pp'] = config['pp']
if 'priority' in config:
params['priority'] = config['priority']
if 'username' in config:
params['ma_username'] = config['username']
if 'password' in config:
params['ma_password'] = config['password']
params['mode'] = 'addurl'
return params
def on_task_output(self, task, config):
for entry in task.accepted:
if task.options.test:
log.info('Would add into sabnzbd: %s' % entry['title'])
continue
params = self.get_params(config)
# allow overriding the category
if 'category' in entry:
# Dirty hack over the next few lines to strip out non-ascii
# chars. We're going to urlencode this, which causes
# serious issues in python2.x if it's not ascii input.
params['cat'] = ''.join([x for x in entry['category'] if ord(x) < 128])
params['name'] = ''.join([x for x in entry['url'] if ord(x) < 128])
# add cleaner nzb name (undocumented api feature)
params['nzbname'] = ''.join([x for x in entry['title'] if ord(x) < 128])
request_url = config['url'] + urlencode(params)
log.debug('request_url: %s' % request_url)
try:
response = task.requests.get(request_url)
except RequestException as e:
log.critical('Failed to use sabnzbd. Requested %s' % request_url)
log.critical('Result was: %s' % e.args[0])
entry.fail('sabnzbd unreachable')
if task.options.debug:
log.exception(e)
continue
if 'error' in response.text.lower():
entry.fail(response.text.replace('\n', ''))
else:
log.info('Added `%s` to SABnzbd' % (entry['title']))
@event('plugin.register')
def register_plugin():
plugin.register(OutputSabnzbd, 'sabnzbd', api_ver=2)
| mit |
ferriman/SSandSP | pythonscripts/eliza.py | 2 | 7991 | import re
responseStarts = []
responseCurrentIndices = []
responseEnds = []
previousInput = ""
userInput = ""
CONVERSATION_KEYWORDS = ["CAN YOU", "CAN I", "YOU ARE", "YOURE", "I DONT", "I FEEL", "WHY DONT YOU", "WHY CANT I","ARE YOU", "I CANT", "I AM", " IM ", "YOU", "I WANT", "WHAT", "HOW", "WHO", "WHERE","WHEN", "WHY", "NAME", "CAUSE", "SORRY", "DREAM", "HELLO", "HI", "MAYBE", "NO", "YOUR","ALWAYS", "THINK", "ALIKE", "YES", "FRIEND", "COMPUTER", "NOKEYFOUND"]
WORDS_TO_REPLACE = ["ARE","AM","WERE","WAS","YOU","I","YOUR","MY","IVE","YOUVE","IM","YOURE", "YOU", "ME"]
QUESTIONS = [
"DON'T YOU BELIEVE THAT I CAN.", "PERHAPS YOU WOULD LIKE TO BE ABLE TO.",
"YOU WANT ME TO BE ABLE TO*", "PERHAPS YOU DON'T WANT TO*",
"DO YOU WANT TO BE ABLE TO*", "WHAT MAKES YOU THINK I AM*",
"DOES IT PLEASE YOU TO BELIEVE I AM*", "PERHAPS YOU WOULD LIKE TO BE*",
"DO YOU SOMETIMES WISH YOU WERE*", "DON'T YOU REALLY*", "WHY DON'T YOU*",
"DO YOU WISH TO BE ABLE TO*", "DOES THAT TROUBLE YOU?",
"TELL ME MORE ABOUT SUCH FEELINGS*", "DO YOU OFTEN FEEL*",
"DO YOU ENJOY FEELING*", "DO YOU REALLY BELIEVE I DON'T*",
"PERHAPS IN TIME I WILL*", "DO YOU WANT ME TO*",
"DO YOU THINK YOU SHOULD BE ABLE TO*", "WHY CAN'T YOU*",
"WHAT MAKES YOU WONDER WHETHER OR NOT I AM*",
"WOULD YOU PREFER IF I WERE NOT*", "PERHAPS IN YOUR FANTASIES I AM*",
"HOW DO YOU KNOW YOU CAN'T*", "HAVE YOU TRIED?", "PERHAPS YOU CAN NOW*",
"DID YOU COME TO ME BECAUSE YOU ARE*", "HOW LONG HAVE YOU BEEN*",
"DO YOU BELIEVE IT IS NORMAL TO BE*", "DO YOU ENJOY BEING*",
"I AM MORE INTERESTED IN TALKING ABOUT YOU.", "OH, I*",
"ARE YOU REALLY SO INTERESTED IN SOMEONE LIKE ME?",
"WHAT WOULD IT MEAN TO YOU IF YOU GOT*", "WHY DO YOU WANT*",
"SUPPOSE YOU SOON GOT*", "WHAT IF YOU NEVER GOT*", "I SOMETIMES ALSO WANT*",
"WHY DO YOU ASK?", "DOES THAT QUESTION INTEREST YOU?",
"WHAT ANSWER WOULD PLEASE YOU THE MOST?", "WHAT DO YOU THINK?",
"ARE SUCH QUESTIONS ON YOUR MIND OFTEN?",
"WHAT IS IT THAT YOU REALLY WANT TO KNOW?", "HAVE YOU ASKED ANYONE ELSE?",
"HAVE YOU ASKED SUCH QUESTIONS BEFORE?",
"WHAT ELSE COMES TO MIND WHEN YOU ASK THAT?", "WE CAN KEEP THIS ANONYMOUS.",
"NO NEED TO SPECIFY ANY NAMES-- PLEASE GO ON.", "IS THAT THE REAL REASON?",
"DON'T ANY OTHER REASONS COME TO MIND?",
"DOES THAT REASON EXPLAIN ANYTHING ELSE?", "WHAT OTHER REASONS MIGHT THERE BE?",
"PLEASE DON'T APOLOGIZE.", "APOLOGIES ARE NOT NECESSARY.",
"WHAT FEELINGS DO YOU HAVE WHEN YOU APOLOGIZE?", "NO NEED TO BE DEFENSIVE!",
"WHAT DOES THAT DREAM SUGGEST TO YOU?", "DO YOU DREAM OFTEN?",
"WHAT PERSONS APPEAR IN YOUR DREAMS?", "DO YOU HAVE PLEASANT DREAMS?",
"HOW DO YOU DO ... PLEASE STATE YOUR PROBLEM.", "YOU DON'T SEEM QUITE CERTAIN.",
"WHY THE UNCERTAIN TONE?", "LET'S TRY TO KEEP THIS POSITIVE.", "YOU AREN'T SURE?",
"DON'T YOU KNOW?", "IS THAT A DEFINITE NO OR MIGHT YOU CHANGE YOUR MIND?",
"I AM SENSING SOME NEGATIVITY.", "WHY NOT?", "ARE YOU SURE?", "WHY NO?",
"WHY ARE YOU CONCERNED ABOUT MY*", "WHAT ABOUT YOUR OWN*",
"CAN'T YOU THINK OF A SPECIFIC EXAMPLE?", "WHEN?", "WHAT ARE YOU THINKING OF?",
"REALLY. ALWAYS?", "DO YOU REALLY THINK SO?", "BUT YOU ARE NOT SURE YOU.",
"BELIEVE IN YOURSELF.", "IN WHAT WAY?", "WHAT RESEMBLANCE DO YOU SEE?",
"WHAT DOES THE SIMILARITY SUGGEST TO YOU?",
"WHAT OTHER CONNECTIONS DO YOU SEE?", "COULD THERE REALLY BE SOME CONNECTION?",
"HOW?", "YOU SEEM QUITE POSITIVE.", "ARE YOU SURE?", "I SEE.", "I UNDERSTAND.",
"TELL ME ABOUT YOUR FRIENDS.", "ARE YOU WORRIED ABOUT YOUR FRIENDS?",
"DO YOUR FRIENDS EVER GIVE YOU A HARD TIME?", "WHAT DO YOU LIKE ABOUT YOUR FRIENDS?",
"DO YOU LOVE YOUR FRIENDS?", "PERHAPS YOUR LOVE FOR FRIENDS WORRIES YOU.",
"DO COMPUTERS EXCITE YOU?", "ARE YOU TALKING ABOUT ME IN PARTICULAR?",
"HOW DO YOU LIKE YOUR WATCH?", "WHY DO YOU MENTION COMPUTERS?",
"DO YOU FIND MACHINES AS FASCINATING AS I DO?",
"DON'T YOU THINK COMPUTERS CAN HELP PEOPLE?",
"WHAT ABOUT MACHINES EXCITES YOU THE MOST?",
"HEY THERE, HOW CAN I HELP YOU?",
"WHAT DOES THAT SUGGEST TO YOU?", "I SEE.",
"I'M NOT SURE I UNDERSTAND YOU FULLY.", "COME COME ELUCIDATE YOUR THOUGHTS.",
"CAN YOU ELABORATE ON THAT?", "THAT IS QUITE INTERESTING."]
# pairs of numbers (start of reply string, number of reply strings)
CONVERSATION_TO_RESPONSES_MAP = [
1,3,4,2,6,4,6,4,10,4,14,3,17,3,20,2,22,3,25,3,
28,4,28,4,32,3,35,5,40,9,40,9,40,9,40,9,40,9,40,9,
49,2,51,4,55,4,59,4,63,1,63,1,64,5,69,5,74,2,76,4,
80,3,83,7,90,3,93,6,99,7,106,6]
# response arrays init
for i in xrange(0,len(CONVERSATION_TO_RESPONSES_MAP)/2):
responseStarts.append(CONVERSATION_TO_RESPONSES_MAP[2 * i]) # start of reply string
responseCurrentIndices.append(CONVERSATION_TO_RESPONSES_MAP[2 * i]) # start at first position
responseEnds.append(responseStarts[i] + CONVERSATION_TO_RESPONSES_MAP[2 * i + 1]) # number of reply strings
def elizabot(inputText):
result = ""
global previousInput
inputText = " " + inputText.upper().replace("'", "") + " " # reformat inputText, remove the '
if previousInput != " " and inputText == previousInput: # repeat the last sentence?
return "DIDN'T YOU JUST SAY THAT?"
previousInput = inputText
# search keywords on inputText
keywordIndex = 0
while keywordIndex < len(CONVERSATION_KEYWORDS):
index=inputText.find(CONVERSATION_KEYWORDS[keywordIndex])
if index != -1:
break
keywordIndex=keywordIndex+1
afterKeyword = ""
# now, keywordIndex has the first keyword found in inputText, 36 if not any
if keywordIndex == len(CONVERSATION_KEYWORDS):
keywordIndex = 35 # 36 -> 35
else:
index=inputText.find(CONVERSATION_KEYWORDS[keywordIndex])
afterKeyword = inputText[index+len(CONVERSATION_KEYWORDS[keywordIndex]):] # get the input text after the keyword
parts = re.split("\s+", afterKeyword) # afterKeyword is splited by word
for i in xrange (0,len(WORDS_TO_REPLACE)/2): # go through the list of words to replace
first = WORDS_TO_REPLACE[i * 2] # original word
second = WORDS_TO_REPLACE[i * 2 + 1] # replaced by...
for j in xrange (0,len(parts)): # replacing all the tenses words found on inputText parts
if parts[j]==first:
parts[j]= second
else:
if parts[j]==second:
parts[j]=first
afterKeyword = str.join(" ",parts) # join string again
question = QUESTIONS[responseCurrentIndices[keywordIndex] - 1] # map the expresion used by the user with a proper answer/question sequence
responseCurrentIndices[keywordIndex] = responseCurrentIndices[keywordIndex] + 1; # change the content of responseCurrentIndices, >> right
if responseCurrentIndices[keywordIndex] > responseEnds[keywordIndex]: # if the sequence ends...
responseCurrentIndices[keywordIndex] = responseStarts[keywordIndex] # ... start again
result = result + question
if result.endswith("*"): # if question ends with a *, uses the inputText as part of the response
result = result[:-1]
result = result + " " + afterKeyword;
return result
print "Hi!, I'm a basic chat bot called ELIZA, your Rogerian psychotherapist."
while userInput!="quit": # input loop
userInput=raw_input('#>')
print elizabot(userInput)
| gpl-3.0 |
StuartLittlefair/astropy | astropy/modeling/tests/test_constraints.py | 1 | 22843 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import types
import pytest
import numpy as np
from numpy.testing import assert_allclose
from numpy.random import RandomState
from astropy.modeling.core import Fittable1DModel
from astropy.modeling.parameters import Parameter
from astropy.modeling import models
from astropy.modeling import fitting
from astropy.utils.exceptions import AstropyUserWarning
try:
from scipy import optimize
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
class TestNonLinearConstraints:
def setup_class(self):
self.g1 = models.Gaussian1D(10, 14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, 13, stddev=.4)
self.x = np.arange(10, 20, .1)
self.y1 = self.g1(self.x)
self.y2 = self.g2(self.x)
rsn = RandomState(1234567890)
self.n = rsn.randn(100)
self.ny1 = self.y1 + 2 * self.n
self.ny2 = self.y2 + 2 * self.n
@pytest.mark.skipif('not HAS_SCIPY')
def test_fixed_par(self):
g1 = models.Gaussian1D(10, mean=14.9, stddev=.3,
fixed={'amplitude': True})
fitter = fitting.LevMarLSQFitter()
model = fitter(g1, self.x, self.ny1)
assert model.amplitude.value == 10
@pytest.mark.skipif('not HAS_SCIPY')
def test_tied_par(self):
def tied(model):
mean = 50 * model.stddev
return mean
g1 = models.Gaussian1D(10, mean=14.9, stddev=.3, tied={'mean': tied})
fitter = fitting.LevMarLSQFitter()
model = fitter(g1, self.x, self.ny1)
assert_allclose(model.mean.value, 50 * model.stddev,
rtol=10 ** (-5))
@pytest.mark.skipif('not HAS_SCIPY')
def test_joint_fitter(self):
g1 = models.Gaussian1D(10, 14.9, stddev=.3)
g2 = models.Gaussian1D(10, 13, stddev=.4)
jf = fitting.JointFitter([g1, g2], {g1: ['amplitude'],
g2: ['amplitude']}, [9.8])
x = np.arange(10, 20, .1)
y1 = g1(x)
y2 = g2(x)
n = np.random.randn(100)
ny1 = y1 + 2 * n
ny2 = y2 + 2 * n
jf(x, ny1, x, ny2)
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def compmodel(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errf(p, x1, y1, x2, y2):
return np.ravel(
np.r_[compmodel(p[0], p[1:3], x1) - y1,
compmodel(p[0], p[3:], x2) - y2])
fitparams, _ = optimize.leastsq(errf, p, args=(x, ny1, x, ny2))
assert_allclose(jf.fitparams, fitparams, rtol=10 ** (-5))
assert_allclose(g1.amplitude.value, g2.amplitude.value)
@pytest.mark.skipif('not HAS_SCIPY')
def test_no_constraints(self):
g1 = models.Gaussian1D(9.9, 14.5, stddev=.3)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errf(p, x, y):
return func(p, x) - y
p0 = [9.9, 14.5, 0.3]
y = g1(self.x)
n = np.random.randn(100)
ny = y + n
fitpar, s = optimize.leastsq(errf, p0, args=(self.x, ny))
fitter = fitting.LevMarLSQFitter()
model = fitter(g1, self.x, ny)
assert_allclose(model.parameters, fitpar, rtol=5 * 10 ** (-3))
@pytest.mark.skipif('not HAS_SCIPY')
class TestBounds:
def setup_class(self):
A = -2.0
B = 0.5
self.x = np.linspace(-1.0, 1.0, 100)
self.y = A * self.x + B + np.random.normal(scale=0.1, size=100)
data = np.array([505.0, 556.0, 630.0, 595.0, 561.0, 553.0, 543.0, 496.0, 460.0, 469.0,
426.0, 518.0, 684.0, 798.0, 830.0, 794.0, 649.0, 706.0, 671.0, 545.0,
479.0, 454.0, 505.0, 700.0, 1058.0, 1231.0, 1325.0, 997.0, 1036.0, 884.0,
610.0, 487.0, 453.0, 527.0, 780.0, 1094.0, 1983.0, 1993.0, 1809.0, 1525.0,
1056.0, 895.0, 604.0, 466.0, 510.0, 678.0, 1130.0, 1986.0, 2670.0, 2535.0,
1878.0, 1450.0, 1200.0, 663.0, 511.0, 474.0, 569.0, 848.0, 1670.0, 2611.0,
3129.0, 2507.0, 1782.0, 1211.0, 723.0, 541.0, 511.0, 518.0, 597.0, 1137.0,
1993.0, 2925.0, 2438.0, 1910.0, 1230.0, 738.0, 506.0, 461.0, 486.0, 597.0,
733.0, 1262.0, 1896.0, 2342.0, 1792.0, 1180.0, 667.0, 482.0, 454.0, 482.0,
504.0, 566.0, 789.0, 1194.0, 1545.0, 1361.0, 933.0, 562.0, 418.0, 463.0,
435.0, 466.0, 528.0, 487.0, 664.0, 799.0, 746.0, 550.0, 478.0, 535.0, 443.0,
416.0, 439.0, 472.0, 472.0, 492.0, 523.0, 569.0, 487.0, 441.0, 428.0])
self.data = data.reshape(11, 11)
def test_bounds_lsq(self):
guess_slope = 1.1
guess_intercept = 0.0
bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept,
bounds=bounds)
fitter = fitting.LevMarLSQFitter()
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10 ** -5 >= bounds['slope'][0]
assert slope - 10 ** -5 <= bounds['slope'][1]
assert intercept + 10 ** -5 >= bounds['intercept'][0]
assert intercept - 10 ** -5 <= bounds['intercept'][1]
def test_bounds_slsqp(self):
guess_slope = 1.1
guess_intercept = 0.0
bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept,
bounds=bounds)
fitter = fitting.SLSQPLSQFitter()
with pytest.warns(AstropyUserWarning, match='consider using linear fitting methods'):
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10 ** -5 >= bounds['slope'][0]
assert slope - 10 ** -5 <= bounds['slope'][1]
assert intercept + 10 ** -5 >= bounds['intercept'][0]
assert intercept - 10 ** -5 <= bounds['intercept'][1]
def test_bounds_gauss2d_lsq(self):
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {"x_mean": [0., 11.],
"y_mean": [0., 11.],
"x_stddev": [1., 4],
"y_stddev": [1., 4]}
gauss = models.Gaussian2D(amplitude=10., x_mean=5., y_mean=5.,
x_stddev=4., y_stddev=4., theta=0.5,
bounds=bounds)
gauss_fit = fitting.LevMarLSQFitter()
with pytest.warns(AstropyUserWarning,
match='The fit may be unsuccessful'):
model = gauss_fit(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10 ** -5 >= bounds['x_mean'][0]
assert x_mean - 10 ** -5 <= bounds['x_mean'][1]
assert y_mean + 10 ** -5 >= bounds['y_mean'][0]
assert y_mean - 10 ** -5 <= bounds['y_mean'][1]
assert x_stddev + 10 ** -5 >= bounds['x_stddev'][0]
assert x_stddev - 10 ** -5 <= bounds['x_stddev'][1]
assert y_stddev + 10 ** -5 >= bounds['y_stddev'][0]
assert y_stddev - 10 ** -5 <= bounds['y_stddev'][1]
def test_bounds_gauss2d_slsqp(self):
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {"x_mean": [0., 11.],
"y_mean": [0., 11.],
"x_stddev": [1., 4],
"y_stddev": [1., 4]}
gauss = models.Gaussian2D(amplitude=10., x_mean=5., y_mean=5.,
x_stddev=4., y_stddev=4., theta=0.5,
bounds=bounds)
gauss_fit = fitting.SLSQPLSQFitter()
# Warning does not appear in all the CI jobs.
# TODO: Rewrite the test for more consistent warning behavior.
with pytest.warns(None) as warning_lines:
model = gauss_fit(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10 ** -5 >= bounds['x_mean'][0]
assert x_mean - 10 ** -5 <= bounds['x_mean'][1]
assert y_mean + 10 ** -5 >= bounds['y_mean'][0]
assert y_mean - 10 ** -5 <= bounds['y_mean'][1]
assert x_stddev + 10 ** -5 >= bounds['x_stddev'][0]
assert x_stddev - 10 ** -5 <= bounds['x_stddev'][1]
assert y_stddev + 10 ** -5 >= bounds['y_stddev'][0]
assert y_stddev - 10 ** -5 <= bounds['y_stddev'][1]
for w in warning_lines:
assert issubclass(w.category, AstropyUserWarning)
assert 'The fit may be unsuccessful' in str(w.message)
class TestLinearConstraints:
def setup_class(self):
self.p1 = models.Polynomial1D(4)
self.p1.c0 = 0
self.p1.c1 = 0
self.p1.window = [0., 9.]
self.x = np.arange(10)
self.y = self.p1(self.x)
rsn = RandomState(1234567890)
self.n = rsn.randn(10)
self.ny = self.y + self.n
def test(self):
self.p1.c0.fixed = True
self.p1.c1.fixed = True
pfit = fitting.LinearLSQFitter()
model = pfit(self.p1, self.x, self.y)
assert_allclose(self.y, model(self.x))
# Test constraints as parameter properties
def test_set_fixed_1():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.mean.fixed = True
assert gauss.fixed == {'amplitude': False, 'mean': True, 'stddev': False}
def test_set_fixed_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
fixed={'mean': True})
assert gauss.mean.fixed is True
def test_set_tied_1():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.amplitude.tied = tie_amplitude
assert gauss.amplitude.tied is not False
assert isinstance(gauss.tied['amplitude'], types.FunctionType)
def test_set_tied_2():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
tied={'amplitude': tie_amplitude})
assert gauss.amplitude.tied
def test_unset_fixed():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
fixed={'mean': True})
gauss.mean.fixed = False
assert gauss.fixed == {'amplitude': False, 'mean': False, 'stddev': False}
def test_unset_tied():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
tied={'amplitude': tie_amplitude})
gauss.amplitude.tied = False
assert gauss.tied == {'amplitude': False, 'mean': False, 'stddev': False}
def test_set_bounds_1():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
bounds={'stddev': (0, None)})
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (0.0, None)}
def test_set_bounds_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.stddev.min = 0.
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (0.0, None)}
def test_unset_bounds():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
bounds={'stddev': (0, 2)})
gauss.stddev.min = None
gauss.stddev.max = None
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (None, None)}
def test_default_constraints():
"""Regression test for https://github.com/astropy/astropy/issues/2396
Ensure that default constraints defined on parameters are carried through
to instances of the models those parameters are defined for.
"""
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=0, min=0, fixed=True)
@staticmethod
def evaluate(x, a, b):
return x * a + b
assert MyModel.a.default == 1
assert MyModel.b.default == 0
assert MyModel.b.min == 0
assert MyModel.b.bounds == (0, None)
assert MyModel.b.fixed is True
m = MyModel()
assert m.a.value == 1
assert m.b.value == 0
assert m.b.min == 0
assert m.b.bounds == (0, None)
assert m.b.fixed is True
assert m.bounds == {'a': (None, None), 'b': (0, None)}
assert m.fixed == {'a': False, 'b': True}
# Make a model instance that overrides the default constraints and values
m = MyModel(3, 4, bounds={'a': (1, None), 'b': (2, None)},
fixed={'a': True, 'b': False})
assert m.a.value == 3
assert m.b.value == 4
assert m.a.min == 1
assert m.b.min == 2
assert m.a.bounds == (1, None)
assert m.b.bounds == (2, None)
assert m.a.fixed is True
assert m.b.fixed is False
assert m.bounds == {'a': (1, None), 'b': (2, None)}
assert m.fixed == {'a': True, 'b': False}
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_with_fixed_and_bound_constraints():
"""
Regression test for https://github.com/astropy/astropy/issues/2235
Currently doesn't test that the fit is any *good*--just that parameters
stay within their given constraints.
"""
m = models.Gaussian1D(amplitude=3, mean=4, stddev=1,
bounds={'mean': (4, 5)},
fixed={'amplitude': True})
x = np.linspace(0, 10, 10)
y = np.exp(-x ** 2 / 2)
f = fitting.LevMarLSQFitter()
fitted_1 = f(m, x, y)
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
assert fitted_1.amplitude == 3.0
m.amplitude.fixed = False
_ = f(m, x, y)
# It doesn't matter anymore what the amplitude ends up as so long as the
# bounds constraint was still obeyed
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_with_bound_constraints_estimate_jacobian():
"""
Regression test for https://github.com/astropy/astropy/issues/2400
Checks that bounds constraints are obeyed on a custom model that does not
define fit_deriv (and thus its Jacobian must be estimated for non-linear
fitting).
"""
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=2)
@staticmethod
def evaluate(x, a, b):
return a * x + b
m_real = MyModel(a=1.5, b=-3)
x = np.arange(100)
y = m_real(x)
m = MyModel()
f = fitting.LevMarLSQFitter()
fitted_1 = f(m, x, y)
# This fit should be trivial so even without constraints on the bounds it
# should be right
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
m2 = MyModel()
m2.a.bounds = (-2, 2)
f2 = fitting.LevMarLSQFitter()
_ = f2(m2, x, y)
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
# Check that the estimated Jacobian was computed (it doesn't matter what
# the values are so long as they're not all zero.
assert np.any(f2.fit_info['fjac'] != 0)
# https://github.com/astropy/astropy/issues/6014
@pytest.mark.skipif('not HAS_SCIPY')
def test_gaussian2d_positive_stddev():
# This is 2D Gaussian with noise to be fitted, as provided by @ysBach
test = [
[-54.33, 13.81, -34.55, 8.95, -143.71, -0.81, 59.25, -14.78, -204.9,
-30.87, -124.39, 123.53, 70.81, -109.48, -106.77, 35.64, 18.29],
[-126.19, -89.13, 63.13, 50.74, 61.83, 19.06, 65.7, 77.94, 117.14,
139.37, 52.57, 236.04, 100.56, 242.28, -180.62, 154.02, -8.03],
[91.43, 96.45, -118.59, -174.58, -116.49, 80.11, -86.81, 14.62, 79.26,
7.56, 54.99, 260.13, -136.42, -20.77, -77.55, 174.52, 134.41],
[33.88, 7.63, 43.54, 70.99, 69.87, 33.97, 273.75, 176.66, 201.94,
336.34, 340.54, 163.77, -156.22, 21.49, -148.41, 94.88, 42.55],
[82.28, 177.67, 26.81, 17.66, 47.81, -31.18, 353.23, 589.11, 553.27,
242.35, 444.12, 186.02, 140.73, 75.2, -87.98, -18.23, 166.74],
[113.09, -37.01, 134.23, 71.89, 107.88, 198.69, 273.88, 626.63, 551.8,
547.61, 580.35, 337.8, 139.8, 157.64, -1.67, -26.99, 37.35],
[106.47, 31.97, 84.99, -125.79, 195.0, 493.65, 861.89, 908.31, 803.9,
781.01, 532.59, 404.67, 115.18, 111.11, 28.08, 122.05, -58.36],
[183.62, 45.22, 40.89, 111.58, 425.81, 321.53, 545.09, 866.02, 784.78,
731.35, 609.01, 405.41, -19.65, 71.2, -140.5, 144.07, 25.24],
[137.13, -86.95, 15.39, 180.14, 353.23, 699.01, 1033.8, 1014.49,
814.11, 647.68, 461.03, 249.76, 94.8, 41.17, -1.16, 183.76, 188.19],
[35.39, 26.92, 198.53, -37.78, 638.93, 624.41, 816.04, 867.28, 697.0,
491.56, 378.21, -18.46, -65.76, 98.1, 12.41, -102.18, 119.05],
[190.73, 125.82, 311.45, 369.34, 554.39, 454.37, 755.7, 736.61, 542.43,
188.24, 214.86, 217.91, 7.91, 27.46, -172.14, -82.36, -80.31],
[-55.39, 80.18, 267.19, 274.2, 169.53, 327.04, 488.15, 437.53, 225.38,
220.94, 4.01, -92.07, 39.68, 57.22, 144.66, 100.06, 34.96],
[130.47, -4.23, 46.3, 101.49, 115.01, 217.38, 249.83, 115.9, 87.36,
105.81, -47.86, -9.94, -82.28, 144.45, 83.44, 23.49, 183.9],
[-110.38, -115.98, 245.46, 103.51, 255.43, 163.47, 56.52, 33.82,
-33.26, -111.29, 88.08, 193.2, -100.68, 15.44, 86.32, -26.44, -194.1],
[109.36, 96.01, -124.89, -16.4, 84.37, 114.87, -65.65, -58.52, -23.22,
42.61, 144.91, -209.84, 110.29, 66.37, -117.85, -147.73, -122.51],
[10.94, 45.98, 118.12, -46.53, -72.14, -74.22, 21.22, 0.39, 86.03,
23.97, -45.42, 12.05, -168.61, 27.79, 61.81, 84.07, 28.79],
[46.61, -104.11, 56.71, -90.85, -16.51, -66.45, -141.34, 0.96, 58.08,
285.29, -61.41, -9.01, -323.38, 58.35, 80.14, -101.22, 145.65]]
g_init = models.Gaussian2D(x_mean=8, y_mean=8)
fitter = fitting.LevMarLSQFitter()
y, x = np.mgrid[:17, :17]
g_fit = fitter(g_init, x, y, test)
# Compare with @ysBach original result:
# - x_stddev was negative, so its abs value is used for comparison here.
# - theta is beyond (-90, 90) deg, which doesn't make sense, so ignored.
assert_allclose([g_fit.amplitude.value, g_fit.y_stddev.value],
[984.7694929790363, 3.1840618351417307], rtol=1.5e-6)
assert_allclose(g_fit.x_mean.value, 7.198391516587464)
assert_allclose(g_fit.y_mean.value, 7.49720660088511, rtol=5e-7)
assert_allclose(g_fit.x_stddev.value, 1.9840185107597297, rtol=2e-6)
# Issue #6403
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
def test_2d_model():
from astropy.utils import NumpyRNGContext
# 2D model with LevMarLSQFitter
gauss2d = models.Gaussian2D(10.2, 4.3, 5, 2, 1.2, 1.4)
fitter = fitting.LevMarLSQFitter()
X = np.linspace(-1, 7, 200)
Y = np.linspace(-1, 7, 200)
x, y = np.meshgrid(X, Y)
z = gauss2d(x, y)
w = np.ones(x.size)
w.shape = x.shape
with NumpyRNGContext(1234567890):
n = np.random.randn(x.size)
n.shape = x.shape
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# 2D model with LevMarLSQFitter, fixed constraint
gauss2d.x_stddev.fixed = True
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False
p2 = models.Polynomial2D(1, c0_0=1, c1_0=1.2, c0_1=3.2)
z = p2(x, y)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False, fixed constraint
p2.c1_0.fixed = True
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
def test_set_prior_posterior():
model = models.Polynomial1D(1)
model.c0.prior = models.Gaussian1D(2.3, 2, .1)
assert model.c0.prior(2) == 2.3
model.c0.posterior = models.Linear1D(1, .2)
assert model.c0.posterior(1) == 1.2
def test_set_constraints():
g = models.Gaussian1D()
p = models.Polynomial1D(1)
# Set bounds before model combination
g.stddev.bounds = (0, 3)
m = g + p
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (0.0, 3.0),
'c0_1': (None, None),
'c1_1': (None, None)}
# Set bounds on the compound model
m.stddev_0.bounds = (1, 3)
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (1.0, 3.0),
'c0_1': (None, None),
'c1_1': (None, None)}
# Set the bounds of a Parameter directly in the bounds dict
m.bounds['stddev_0'] = (4, 5)
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (4, 5),
'c0_1': (None, None),
'c1_1': (None, None)}
# Set the bounds of a Parameter on the child model bounds dict
g.bounds['stddev'] = (1, 5)
m = g + p
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (1, 5),
'c0_1': (None, None),
'c1_1': (None, None)}
| bsd-3-clause |
luniv/servo | python/mach/mach/base.py | 22 | 3492 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
class CommandContext(object):
"""Holds run-time state so it can easily be passed to command providers."""
def __init__(self, cwd=None, settings=None, log_manager=None,
commands=None, **kwargs):
self.cwd = cwd
self.settings = settings
self.log_manager = log_manager
self.commands = commands
for k,v in kwargs.items():
setattr(self, k, v)
class MachError(Exception):
"""Base class for all errors raised by mach itself."""
class NoCommandError(MachError):
"""No command was passed into mach."""
class UnknownCommandError(MachError):
"""Raised when we attempted to execute an unknown command."""
def __init__(self, command, verb, suggested_commands=None):
MachError.__init__(self)
self.command = command
self.verb = verb
self.suggested_commands = suggested_commands or []
class UnrecognizedArgumentError(MachError):
"""Raised when an unknown argument is passed to mach."""
def __init__(self, command, arguments):
MachError.__init__(self)
self.command = command
self.arguments = arguments
class MethodHandler(object):
"""Describes a Python method that implements a mach command.
Instances of these are produced by mach when it processes classes
defining mach commands.
"""
__slots__ = (
# The Python class providing the command. This is the class type not
# an instance of the class. Mach will instantiate a new instance of
# the class if the command is executed.
'cls',
# Whether the __init__ method of the class should receive a mach
# context instance. This should only affect the mach driver and how
# it instantiates classes.
'pass_context',
# The name of the method providing the command. In other words, this
# is the str name of the attribute on the class type corresponding to
# the name of the function.
'method',
# The name of the command.
'name',
# String category this command belongs to.
'category',
# Description of the purpose of this command.
'description',
# Functions used to 'skip' commands if they don't meet the conditions
# in a given context.
'conditions',
# argparse.ArgumentParser instance to use as the basis for command
# arguments.
'parser',
# Arguments added to this command's parser. This is a 2-tuple of
# positional and named arguments, respectively.
'arguments',
# Argument groups added to this command's parser.
'argument_group_names',
)
def __init__(self, cls, method, name, category=None, description=None,
conditions=None, parser=None, arguments=None,
argument_group_names=None, pass_context=False):
self.cls = cls
self.method = method
self.name = name
self.category = category
self.description = description
self.conditions = conditions or []
self.parser = parser
self.arguments = arguments or []
self.argument_group_names = argument_group_names or []
self.pass_context = pass_context
| mpl-2.0 |
gojira/tensorflow | tensorflow/contrib/tpu/python/tpu/session_support.py | 16 | 12608 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Operations for handling session logging and shutdown notifications."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from google.protobuf import text_format
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
class CoordinatorShutdownException(Exception):
"""Raised when the coordinator needs to shutdown."""
pass
class WorkerHeartbeatManager(object):
"""Manages the status/heartbeat monitor for a set of workers."""
def __init__(self, session, devices, heartbeat_ops, request_placeholder):
"""Construct a new WorkerHeartbeatManager.
(Prefer using `WorkerHeartbeatManager.from_devices` when possible.)
Args:
session: `tf.Session`, session to use for heartbeat operations.
devices: `list[string]` Set of devices to connect to.
heartbeat_ops: `list[tf.Operation]` Heartbeat operations.
request_placeholder: `tf.Placeholder[String]` Placeholder used to specify
the WorkerHeartbeatRequest protocol buffer.
"""
self._session = session
self._devices = devices
self._ops = heartbeat_ops
self._request_placeholder = request_placeholder
@staticmethod
def from_devices(session, devices):
"""Construct a heartbeat manager for the given devices."""
if not devices:
logging.error('Trying to create heartbeat manager with no devices?')
logging.info('Creating heartbeat manager for %s', devices)
request_placeholder = array_ops.placeholder(
name='worker_heartbeat_request', dtype=dtypes.string)
heartbeat_ops = []
for device in devices:
with ops.device(device):
heartbeat_ops.append(tpu_ops.worker_heartbeat(request_placeholder))
return WorkerHeartbeatManager(session, devices, heartbeat_ops,
request_placeholder)
def heartbeat_supported(self):
"""Returns True if heartbeat operations are supported on all workers."""
try:
# Send ping to verify worker has heartbeat support.
self.ping()
return True
except errors.InvalidArgumentError as _:
return False
def configure(self, message):
"""Configure heartbeat manager for all devices.
Args:
message: `event_pb2.WorkerHeartbeatRequest`
Returns: `None`
"""
logging.info('Configuring worker heartbeat: %s',
text_format.MessageToString(message))
self._session.run(self._ops,
{self._request_placeholder: message.SerializeToString()})
def ping(self, request=None, timeout_in_ms=5000):
"""Ping all workers, returning the parsed status results."""
if request is None:
request = event_pb2.WorkerHeartbeatRequest()
options = config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
results = self._session.run(
self._ops,
feed_dict={self._request_placeholder: request.SerializeToString()},
options=options)
parsed_results = [
event_pb2.WorkerHeartbeatResponse.FromString(res_pb)
for res_pb in results
]
logging.debug('Ping results: %s', parsed_results)
return parsed_results
def lame_workers(self):
"""Ping all workers, returning manager containing lame workers (or None)."""
ping_results = self.ping()
lame_workers = []
for ping_response, device, op in zip(ping_results, self._devices,
self._ops):
if ping_response.health_status != event_pb2.OK:
lame_workers.append((device, op))
if not lame_workers:
return None
bad_devices, bad_ops = zip(*lame_workers)
return WorkerHeartbeatManager(self._session, bad_devices, bad_ops,
self._request_placeholder)
def __repr__(self):
return 'HeartbeatManager(%s)' % ','.join(self._devices)
def shutdown(self, timeout_ms=10000):
"""Shutdown all workers after `shutdown_timeout_secs`."""
logging.info('Shutting down %s.', self)
req = event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(timeout_ms=timeout_ms))
self.configure(req)
# Wait for workers to shutdown. This isn't strictly required
# but it avoids triggering multiple checkpoints with the same lame worker.
logging.info('Waiting %dms for worker shutdown.', timeout_ms)
time.sleep(timeout_ms / 1000)
def all_worker_devices(session):
"""Return a list of devices for each worker in the system."""
devices = session.list_devices()
return [device.name for device in devices if 'CPU' in device.name]
class WatchdogManager(threading.Thread):
"""Configures worker watchdog timer and handles periodic pings.
Usage:
# Ping workers every minute, shutting down workers if they haven't received
# a ping after 1 hour.
watchdog_manager = WatchdogManager(
ping_interval=60, shutdown_timeout=3600
)
# Use as a context manager, resetting watchdog on context exit:
with watchdog_manager:
session.run(...)
# Or setup globally; watchdog will remain active until program exit.
watchdog_manager.configure_and_run()
"""
def __init__(self,
session,
devices=None,
ping_interval=60,
shutdown_timeout=3600):
"""Initialize a watchdog manager.
Args:
session: Session connected to worker devices. A cloned session and graph
will be created for managing worker pings.
devices: Set of devices to monitor. If none, all workers will be
monitored.
ping_interval: Time, in seconds, between watchdog pings.
shutdown_timeout: Time, in seconds, before watchdog timeout.
"""
threading.Thread.__init__(self)
self.ping_interval = ping_interval
self.shutdown_timeout = shutdown_timeout
self.daemon = True
self._running = False
self._graph = ops.Graph()
self._session = session_lib.Session(
target=session.sess_str,
graph=self._graph,
)
with self._graph.as_default():
if devices is None:
devices = all_worker_devices(self._session)
self._worker_manager = WorkerHeartbeatManager.from_devices(
self._session, devices)
def configure_and_run(self):
logging.info('Enabling worker watchdog.')
self._running = True
self._worker_manager.configure(
event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(
timeout_ms=self.shutdown_timeout * 1000,)))
self.start()
def __enter__(self):
self.configure_and_run()
def __exit__(self, exc_type, exc_val, exc_tb):
logging.info('Disabling worker watchdog.')
self._worker_manager.configure(
event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(timeout_ms=-1,)))
self._running = False
self.join()
def run(self):
# Don't fetch logs or adjust timing: just ping the watchdog.
while self._running:
self._worker_manager.ping(request=None)
time.sleep(self.ping_interval)
class GracefulShutdownHook(session_run_hook.SessionRunHook):
"""Session hook that watches for shutdown events.
If a shutdown is indicated, `saver.save(checkpoint_prefix)` is executed, and a
SystemShutdown exception is raised to terminate the main session. If `saver`
is None the `SAVERS` collection will be read to find a saver.
`on_shutdown_hooks` is an optional list of functions that should be called
after checkpointing. The function is called with (`run_context`,
`all_workers`, `lame_workers`).
If `heartbeat_group` is not specified, it will default to all CPU workers
in the system.
"""
def __init__(self, checkpoint_prefix, saver=None, on_shutdown_hooks=None):
self._saver = saver
self._checkpoint_prefix = checkpoint_prefix
self._on_shutdown_hooks = on_shutdown_hooks if on_shutdown_hooks else []
# Worker heartbeats are managed independently of the main training graph.
self._graph = ops.Graph()
self._workers = None
self._session = None
self._heartbeat_supported = False
def after_create_session(self, training_session, coord): # pylint: disable=unused-argument
# N.B. We have to pull the global step here to avoid it being unavailable
# at checkpoint time; the graph has been frozen at that point.
if training_util.get_global_step() is None and self.saver() is not None:
raise ValueError(
'Saver defined but no global step. Run `get_or_create_global_step()`'
' in your model definition to allow checkpointing.')
with self._graph.as_default():
logging.info('Installing graceful shutdown hook.')
self._session = session_lib.Session(
target=training_session.sess_str, graph=self._graph)
self._workers = WorkerHeartbeatManager.from_devices(
self._session, all_worker_devices(self._session))
self._heartbeat_supported = self._workers.heartbeat_supported()
if self._heartbeat_supported:
self._workers.configure(
event_pb2.WorkerHeartbeatRequest(
shutdown_mode=event_pb2.WAIT_FOR_COORDINATOR))
else:
logging.warn(
'Worker heartbeats not supported by all workers. No failure '
'handling will be enabled.'
)
def saver(self):
if self._saver:
return self._saver
savers = ops.get_collection(ops.GraphKeys.SAVERS)
if not savers:
return None
if not isinstance(savers, list):
return savers
if len(savers) > 1:
logging.error(
'Multiple savers in the SAVERS collection. On-demand checkpointing '
'will be disabled. Pass an explicit `saver` to the constructor to '
'override this behavior.'
)
return None
return savers[0]
def after_run(self, run_context, run_values):
del run_values
if not self._heartbeat_supported:
return
lame_workers = self._workers.lame_workers()
if lame_workers:
logging.info('ShutdownHook: lame workers found: %s', lame_workers)
if self.saver():
logging.info('ShutdownHook: saving checkpoint to %s',
self._checkpoint_prefix)
self.saver().save(
run_context.session,
self._checkpoint_prefix,
global_step=training_util.get_global_step(),
write_state=True,
)
else:
logging.info('ShutdownHook: no Saver defined.')
for fn in self._on_shutdown_hooks:
fn(run_context, self._workers, lame_workers)
class RestartComputation(object):
"""Restart the entire computation.
This hook shuts down all workers and returns control to the top-level by
throwing a CoordinatorShutdownException.
"""
def __init__(self, timeout_ms=10000):
self.timeout_ms = timeout_ms
def __call__(self, run_context, all_workers, lame_workers):
del run_context, lame_workers
all_workers.shutdown(timeout_ms=self.timeout_ms)
logging.info('Terminating coordinator.')
raise CoordinatorShutdownException()
class ShutdownLameWorkers(object):
"""Shutdown lamed workers.
Processing will continue normally (typically by waiting for the down
workers to be restarted).
"""
def __init__(self, timeout_ms=10000):
self.timeout_in_ms = timeout_ms
def __call__(self, run_context, all_workers, lame_workers):
lame_workers.shutdown(timeout_ms=self.timeout_in_ms)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.