repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
zepheira/freemix | freemix/dataset/augment/views.py | 1 | 1026 | from freemix.dataset.transform import RawTransformView, AkaraTransformClient
from freemix.dataset.augment import models
from freemix.dataset.augment import conf
from django.views.generic.base import View
from freemix.views import JSONResponse
class JSONView(View):
template=None
def get_dict(self, *args, **kwargs):
return {}
def get(self, *args, **kwargs):
content = self.get_dict(*args, **kwargs)
return JSONResponse(content, self.template)
class ListPatternJSONView(JSONView):
def get_dict(self, *args, **kwargs):
return models.ListPattern.to_dict()
pattern_jsonp = ListPatternJSONView.as_view(template="freemix/augment/patterns.js")
pattern_json = ListPatternJSONView.as_view()
class AugmentationErrorJSONView(JSONView):
def get_dict(self, *args, **kwargs):
return models.AugmentationErrorCode.to_dict()
error_json = AugmentationErrorJSONView.as_view()
transform = RawTransformView.as_view(transform=AkaraTransformClient(conf.AKARA_AUGMENT_URL))
| apache-2.0 | -6,750,666,305,143,666,000 | 26 | 92 | 0.745614 | false |
apanda/modeling | tests/examples/withProxySat.py | 1 | 1826 | import components
def withProxySat ():
""" A proxy and a firewall. This results in a SAT result, i.e. the packet goes through
since the proxy is used to send information through"""
ctx = components.Context (['a', 'b', 'c', 'd', 'fw', 'p'],\
['ip_a', 'ip_b', 'ip_c', 'ip_d', 'ip_f', 'ip_p'])
net = components.Network (ctx)
a = components.EndHost(ctx.a, net, ctx)
b = components.EndHost(ctx.b, net, ctx)
c = components.EndHost(ctx.c, net, ctx)
d = components.EndHost(ctx.d, net, ctx)
fw = components.AclFirewall(ctx.fw, net, ctx)
p = components.WebProxy(ctx.p, net, ctx)
net.setAddressMappings([(a, ctx.ip_a), \
(b, ctx.ip_b), \
(c, ctx.ip_c), \
(d, ctx.ip_d), \
(fw, ctx.ip_f), \
(p, ctx.ip_p)])
addresses = [ctx.ip_a, ctx.ip_b, ctx.ip_c, ctx.ip_d, ctx.ip_f]
net.RoutingTable(a, [(x, fw) for x in addresses])
net.RoutingTable(b, [(x, fw) for x in addresses])
net.RoutingTable(c, [(x, p) for x in addresses])
net.RoutingTable(d, [(x, p) for x in addresses])
net.RoutingTable(fw, [(ctx.ip_a, a), \
(ctx.ip_b, b), \
(ctx.ip_c, p), \
(ctx.ip_d, p), \
(ctx.ip_p, p)])
net.RoutingTable(p, [(ctx.ip_a, fw), \
(ctx.ip_b, fw), \
(ctx.ip_c, c), \
(ctx.ip_d, d), \
(ctx.ip_f, fw)])
fw.AddAcls([(ctx.ip_a, ctx.ip_c), (ctx.ip_b, ctx.ip_d)])
net.Attach(a, b, c, d, fw, p)
endhosts = [a, b, c, d]
check = components.PropertyChecker(ctx, net)
return (endhosts, check)
| bsd-3-clause | 3,576,125,686,319,637,500 | 44.65 | 90 | 0.459474 | false |
Tarrasch/luigi | luigi/contrib/external_program.py | 19 | 7177 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2016 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Template tasks for running external programs as luigi tasks.
This module is primarily intended for when you need to call a single external
program or shell script, and it's enough to specify program arguments and
environment variables.
If you need to run multiple commands, chain them together or pipe output
from one command to the next, you're probably better off using something like
`plumbum`_, and wrapping plumbum commands in normal luigi
:py:class:`~luigi.task.Task` s.
.. _plumbum: https://plumbum.readthedocs.io/
"""
import logging
import os
import signal
import subprocess
import sys
import tempfile
import luigi
logger = logging.getLogger('luigi-interface')
class ExternalProgramTask(luigi.Task):
"""
Template task for running an external program in a subprocess
The program is run using :py:class:`subprocess.Popen`, with ``args`` passed
as a list, generated by :py:meth:`program_args` (where the first element should
be the executable). See :py:class:`subprocess.Popen` for details.
Your must override :py:meth:`program_args` to specify the arguments you want,
and you can optionally override :py:meth:`program_environment` if you want to
control the environment variables (see :py:class:`ExternalPythonProgramTask`
for an example).
"""
def program_args(self):
"""
Override this method to map your task parameters to the program arguments
:return: list to pass as ``args`` to :py:class:`subprocess.Popen`
"""
raise NotImplementedError
def program_environment(self):
"""
Override this method to control environment variables for the program
:return: dict mapping environment variable names to values
"""
env = os.environ.copy()
return env
@property
def always_log_stderr(self):
"""
When True, stderr will be logged even if program execution succeeded
Override to False to log stderr only when program execution fails.
"""
return True
def _clean_output_file(self, file_object):
file_object.seek(0)
return ''.join(map(lambda s: s.decode('utf-8'), file_object.readlines()))
def run(self):
args = list(map(str, self.program_args()))
logger.info('Running command: %s', ' '.join(args))
tmp_stdout, tmp_stderr = tempfile.TemporaryFile(), tempfile.TemporaryFile()
env = self.program_environment()
proc = subprocess.Popen(
args,
env=env,
stdout=tmp_stdout,
stderr=tmp_stderr
)
try:
with ExternalProgramRunContext(proc):
proc.wait()
success = proc.returncode == 0
stdout = self._clean_output_file(tmp_stdout)
stderr = self._clean_output_file(tmp_stderr)
if stdout:
logger.info('Program stdout:\n{}'.format(stdout))
if stderr:
if self.always_log_stderr or not success:
logger.info('Program stderr:\n{}'.format(stderr))
if not success:
raise ExternalProgramRunError(
'Program failed with return code={}:'.format(proc.returncode),
args, env=env, stdout=stdout, stderr=stderr)
finally:
tmp_stderr.close()
tmp_stdout.close()
class ExternalProgramRunContext(object):
def __init__(self, proc):
self.proc = proc
def __enter__(self):
self.__old_signal = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self.kill_job)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is KeyboardInterrupt:
self.kill_job()
signal.signal(signal.SIGTERM, self.__old_signal)
def kill_job(self, captured_signal=None, stack_frame=None):
self.proc.kill()
if captured_signal is not None:
# adding 128 gives the exit code corresponding to a signal
sys.exit(128 + captured_signal)
class ExternalProgramRunError(RuntimeError):
def __init__(self, message, args, env=None, stdout=None, stderr=None):
super(ExternalProgramRunError, self).__init__(message, args, env, stdout, stderr)
self.message = message
self.args = args
self.env = env
self.out = stdout
self.err = stderr
def __str__(self):
info = self.message
info += '\nCOMMAND: {}'.format(' '.join(self.args))
info += '\nSTDOUT: {}'.format(self.out or '[empty]')
info += '\nSTDERR: {}'.format(self.err or '[empty]')
env_string = None
if self.env:
env_string = ' '.join(['='.join([k, '\'{}\''.format(v)]) for k, v in self.env.items()])
info += '\nENVIRONMENT: {}'.format(env_string or '[empty]')
# reset terminal color in case the ENVIRONMENT changes colors
info += '\033[m'
return info
class ExternalPythonProgramTask(ExternalProgramTask):
"""
Template task for running an external Python program in a subprocess
Simple extension of :py:class:`ExternalProgramTask`, adding two
:py:class:`luigi.parameter.Parameter` s for setting a virtualenv and for
extending the ``PYTHONPATH``.
"""
virtualenv = luigi.Parameter(
default=None,
positional=False,
description='path to the virtualenv directory to use. It should point to '
'the directory containing the ``bin/activate`` file used for '
'enabling the virtualenv.')
extra_pythonpath = luigi.Parameter(
default=None,
positional=False,
description='extend the search path for modules by prepending this '
'value to the ``PYTHONPATH`` environment variable.')
def program_environment(self):
env = super(ExternalPythonProgramTask, self).program_environment()
if self.extra_pythonpath:
pythonpath = ':'.join([self.extra_pythonpath, env.get('PYTHONPATH', '')])
env.update({'PYTHONPATH': pythonpath})
if self.virtualenv:
# Make the same changes to the env that a normal venv/bin/activate script would
path = ':'.join(['{}/bin'.format(self.virtualenv), env.get('PATH', '')])
env.update({
'PATH': path,
'VIRTUAL_ENV': self.virtualenv
})
# remove PYTHONHOME env variable, if it exists
env.pop('PYTHONHOME', None)
return env
| apache-2.0 | 4,729,729,993,393,629,000 | 34.009756 | 99 | 0.630486 | false |
agodbehere/with-venv | travis_pypi_setup.py | 1 | 3758 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'agodbehere/with-venv'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| isc | 6,831,854,379,824,786,000 | 29.803279 | 79 | 0.679351 | false |
w1ll1am23/home-assistant | homeassistant/components/fastdotcom/__init__.py | 24 | 1951 | """Support for testing internet speed via Fast.com."""
from datetime import timedelta
import logging
from fastdotcom import fast_com
import voluptuous as vol
from homeassistant.const import CONF_SCAN_INTERVAL
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
DOMAIN = "fastdotcom"
DATA_UPDATED = f"{DOMAIN}_data_updated"
_LOGGER = logging.getLogger(__name__)
CONF_MANUAL = "manual"
DEFAULT_INTERVAL = timedelta(hours=1)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_MANUAL, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Fast.com component."""
conf = config[DOMAIN]
data = hass.data[DOMAIN] = SpeedtestData(hass)
if not conf[CONF_MANUAL]:
async_track_time_interval(hass, data.update, conf[CONF_SCAN_INTERVAL])
def update(call=None):
"""Service call to manually update the data."""
data.update()
hass.services.async_register(DOMAIN, "speedtest", update)
hass.async_create_task(async_load_platform(hass, "sensor", DOMAIN, {}, config))
return True
class SpeedtestData:
"""Get the latest data from fast.com."""
def __init__(self, hass):
"""Initialize the data object."""
self.data = None
self._hass = hass
def update(self, now=None):
"""Get the latest data from fast.com."""
_LOGGER.debug("Executing fast.com speedtest")
self.data = {"download": fast_com()}
dispatcher_send(self._hass, DATA_UPDATED)
| apache-2.0 | 4,986,699,854,218,640,000 | 26.871429 | 84 | 0.657611 | false |
torchingloom/edx-platform | cms/envs/test.py | 1 | 7368 | """
This config file runs the simplest dev environment using sqlite, and db-based
sessions. Assumes structure:
/envroot/
/db # This is where it'll write the database file
/edx-platform # The location of this repo
/log # Where we're going to write log files
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .common import *
import os
from path import path
from warnings import filterwarnings
# import settings from LMS for consistent behavior with CMS
from lms.envs.test import (WIKI_ENABLED)
# Nose Test Runner
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
_system = 'cms'
_report_dir = REPO_ROOT / 'reports' / _system
_report_dir.makedirs_p()
NOSE_ARGS = [
'--id-file', REPO_ROOT / '.testids' / _system / 'noseids',
'--xunit-file', _report_dir / 'nosetests.xml',
]
TEST_ROOT = path('test_root')
# Want static files in the same dir for running on jenkins.
STATIC_ROOT = TEST_ROOT / "staticfiles"
GITHUB_REPO_ROOT = TEST_ROOT / "data"
COMMON_TEST_DATA_ROOT = COMMON_ROOT / "test" / "data"
# For testing "push to lms"
FEATURES['ENABLE_EXPORT_GIT'] = True
GIT_REPO_EXPORT_DIR = TEST_ROOT / "export_course_repos"
# Makes the tests run much faster...
SOUTH_TESTS_MIGRATE = False # To disable migrations and use syncdb instead
# TODO (cpennington): We need to figure out how envs/test.py can inject things into common.py so that we don't have to repeat this sort of thing
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
]
STATICFILES_DIRS += [
(course_dir, COMMON_TEST_DATA_ROOT / course_dir)
for course_dir in os.listdir(COMMON_TEST_DATA_ROOT)
if os.path.isdir(COMMON_TEST_DATA_ROOT / course_dir)
]
DOC_STORE_CONFIG = {
'host': 'localhost',
'db': 'test_xmodule',
'collection': 'test_modulestore',
}
MODULESTORE_OPTIONS = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': TEST_ROOT / "data",
'render_template': 'edxmako.shortcuts.render_to_string',
}
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.draft.DraftModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': MODULESTORE_OPTIONS
},
'direct': {
'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': MODULESTORE_OPTIONS
},
'draft': {
'ENGINE': 'xmodule.modulestore.draft.DraftModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': MODULESTORE_OPTIONS
},
'split': {
'ENGINE': 'xmodule.modulestore.split_mongo.SplitMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': MODULESTORE_OPTIONS
}
}
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'test_xcontent',
'collection': 'dont_trip',
},
# allow for additional options that can be keyed on a name, e.g. 'trashcan'
'ADDITIONAL_OPTIONS': {
'trashcan': {
'bucket': 'trash_fs'
}
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': TEST_ROOT / "db" / "cms.db",
},
}
LMS_BASE = "localhost:8000"
FEATURES['PREVIEW_LMS_BASE'] = "preview"
CACHES = {
# This is the cache used for most things. Askbot will not work without a
# functioning cache -- it relies on caching to load its settings in places.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': '/var/tmp/mongo_metadata_inheritance',
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'loc_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
},
}
# Add external_auth to Installed apps for testing
INSTALLED_APPS += ('external_auth', )
# hide ratelimit warnings while running tests
filterwarnings('ignore', message='No request passed to the backend, unable to rate-limit')
################################# CELERY ######################################
CELERY_ALWAYS_EAGER = True
CELERY_RESULT_BACKEND = 'cache'
BROKER_TRANSPORT = 'memory'
########################### Server Ports ###################################
# These ports are carefully chosen so that if the browser needs to
# access them, they will be available through the SauceLabs SSH tunnel
LETTUCE_SERVER_PORT = 8003
XQUEUE_PORT = 8040
YOUTUBE_PORT = 8031
LTI_PORT = 8765
################### Make tests faster
# http://slacy.com/blog/2012/04/make-your-tests-faster-in-django-1-4/
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
)
# dummy segment-io key
SEGMENT_IO_KEY = '***REMOVED***'
# disable NPS survey in test mode
FEATURES['STUDIO_NPS_SURVEY'] = False
FEATURES['ENABLE_SERVICE_STATUS'] = True
# This is to disable a test under the common directory that will not pass when run under CMS
FEATURES['DISABLE_RESET_EMAIL_TEST'] = True
# Toggles embargo on for testing
FEATURES['EMBARGO'] = True
# set up some testing for microsites
MICROSITE_CONFIGURATION = {
"test_microsite": {
"domain_prefix": "testmicrosite",
"university": "test_microsite",
"platform_name": "Test Microsite",
"logo_image_url": "test_microsite/images/header-logo.png",
"email_from_address": "[email protected]",
"payment_support_email": "[email protected]",
"ENABLE_MKTG_SITE": False,
"SITE_NAME": "test_microsite.localhost",
"course_org_filter": "TestMicrositeX",
"course_about_show_social_links": False,
"css_overrides_file": "test_microsite/css/test_microsite.css",
"show_partners": False,
"show_homepage_promo_video": False,
"course_index_overlay_text": "This is a Test Microsite Overlay Text.",
"course_index_overlay_logo_file": "test_microsite/images/header-logo.png",
"homepage_overlay_html": "<h1>This is a Test Microsite Overlay HTML</h1>"
},
"default": {
"university": "default_university",
"domain_prefix": "www",
}
}
MICROSITE_ROOT_DIR = COMMON_ROOT / 'test' / 'test_microsites'
FEATURES['USE_MICROSITES'] = True
# For consistency in user-experience, keep the value of this setting in sync with
# the one in lms/envs/test.py
FEATURES['ENABLE_DISCUSSION_SERVICE'] = False
| agpl-3.0 | 1,389,359,036,859,832,300 | 30.896104 | 144 | 0.649701 | false |
bilgili/nest-simulator | topology/examples/grid_iaf.py | 13 | 1498 | # -*- coding: utf-8 -*-
#
# grid_iaf.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module Example
Create layer of 4x3 iaf_neurons, visualize
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
'''
import nest
import pylab
import nest.topology as topo
pylab.ion()
nest.ResetKernel()
l1 = topo.CreateLayer({'columns': 4, 'rows': 3,
'extent': [2.0, 1.5],
'elements': 'iaf_neuron'})
nest.PrintNetwork()
nest.PrintNetwork(2)
nest.PrintNetwork(2, l1)
topo.PlotLayer(l1, nodesize=50)
# beautify
pylab.axis([-1.0, 1.0, -0.75, 0.75])
pylab.axes().set_aspect('equal', 'box')
pylab.axes().set_xticks((-0.75, -0.25, 0.25, 0.75))
pylab.axes().set_yticks((-0.5, 0, 0.5))
pylab.grid(True)
pylab.xlabel('4 Columns, Extent: 1.5')
pylab.ylabel('2 Rows, Extent: 1.0')
# pylab.savefig('grid_iaf.png')
| gpl-2.0 | 5,879,585,912,008,757,000 | 24.827586 | 70 | 0.686248 | false |
hanlind/nova | nova/api/openstack/compute/keypairs.py | 2 | 13850 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keypair management extension."""
import webob
import webob.exc
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import keypairs
from nova.api.openstack.compute.views import keypairs as keypairs_view
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute_api
from nova import exception
from nova.i18n import _
from nova.objects import keypair as keypair_obj
from nova.policies import keypairs as kp_policies
ALIAS = 'os-keypairs'
class KeypairController(wsgi.Controller):
"""Keypair API controller for the OpenStack API."""
_view_builder_class = keypairs_view.ViewBuilder
def __init__(self):
self.api = compute_api.KeypairAPI()
super(KeypairController, self).__init__()
def _filter_keypair(self, keypair, **attrs):
# TODO(claudiub): After v2 and v2.1 is no longer supported,
# keypair.type can be added to the clean dict below
clean = {
'name': keypair.name,
'public_key': keypair.public_key,
'fingerprint': keypair.fingerprint,
}
for attr in attrs:
clean[attr] = keypair[attr]
return clean
@wsgi.Controller.api_version("2.10")
@wsgi.response(201)
@extensions.expected_errors((400, 403, 409))
@validation.schema(keypairs.create_v210)
def create(self, req, body):
"""Create or import keypair.
A policy check restricts users from creating keys for other users
params: keypair object with:
name (required) - string
public_key (optional) - string
type (optional) - string
user_id (optional) - string
"""
# handle optional user-id for admin only
user_id = body['keypair'].get('user_id')
return self._create(req, body, type=True, user_id=user_id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@wsgi.response(201)
@extensions.expected_errors((400, 403, 409))
@validation.schema(keypairs.create_v22)
def create(self, req, body):
"""Create or import keypair.
Sending name will generate a key and return private_key
and fingerprint.
Keypair will have the type ssh or x509, specified by type.
You can send a public_key to add an existing ssh/x509 key.
params: keypair object with:
name (required) - string
public_key (optional) - string
type (optional) - string
"""
return self._create(req, body, type=True)
@wsgi.Controller.api_version("2.1", "2.1") # noqa
@extensions.expected_errors((400, 403, 409))
@validation.schema(keypairs.create_v20, "2.0", "2.0")
@validation.schema(keypairs.create, "2.1", "2.1")
def create(self, req, body):
"""Create or import keypair.
Sending name will generate a key and return private_key
and fingerprint.
You can send a public_key to add an existing ssh key.
params: keypair object with:
name (required) - string
public_key (optional) - string
"""
return self._create(req, body)
def _create(self, req, body, user_id=None, **keypair_filters):
context = req.environ['nova.context']
params = body['keypair']
name = common.normalize_name(params['name'])
key_type = params.get('type', keypair_obj.KEYPAIR_TYPE_SSH)
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'create',
target={'user_id': user_id,
'project_id': context.project_id})
try:
if 'public_key' in params:
keypair = self.api.import_key_pair(context,
user_id, name,
params['public_key'], key_type)
keypair = self._filter_keypair(keypair, user_id=True,
**keypair_filters)
else:
keypair, private_key = self.api.create_key_pair(
context, user_id, name, key_type)
keypair = self._filter_keypair(keypair, user_id=True,
**keypair_filters)
keypair['private_key'] = private_key
return {'keypair': keypair}
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.InvalidKeypair as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.KeyPairExists as exc:
raise webob.exc.HTTPConflict(explanation=exc.format_message())
@wsgi.Controller.api_version("2.1", "2.1")
@validation.query_schema(keypairs.delete_query_schema_v20)
@wsgi.response(202)
@extensions.expected_errors(404)
def delete(self, req, id):
self._delete(req, id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@validation.query_schema(keypairs.delete_query_schema_v20)
@wsgi.response(204)
@extensions.expected_errors(404)
def delete(self, req, id):
self._delete(req, id)
@wsgi.Controller.api_version("2.10") # noqa
@validation.query_schema(keypairs.delete_query_schema_v210)
@wsgi.response(204)
@extensions.expected_errors(404)
def delete(self, req, id):
# handle optional user-id for admin only
user_id = self._get_user_id(req)
self._delete(req, id, user_id=user_id)
def _delete(self, req, id, user_id=None):
"""Delete a keypair with a given name."""
context = req.environ['nova.context']
# handle optional user-id for admin only
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'delete',
target={'user_id': user_id,
'project_id': context.project_id})
try:
self.api.delete_key_pair(context, user_id, id)
except exception.KeypairNotFound as exc:
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
def _get_user_id(self, req):
if 'user_id' in req.GET.keys():
user_id = req.GET.getall('user_id')[0]
return user_id
@wsgi.Controller.api_version("2.10")
@validation.query_schema(keypairs.show_query_schema_v210)
@extensions.expected_errors(404)
def show(self, req, id):
# handle optional user-id for admin only
user_id = self._get_user_id(req)
return self._show(req, id, type=True, user_id=user_id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@validation.query_schema(keypairs.show_query_schema_v20)
@extensions.expected_errors(404)
def show(self, req, id):
return self._show(req, id, type=True)
@wsgi.Controller.api_version("2.1", "2.1") # noqa
@validation.query_schema(keypairs.show_query_schema_v20)
@extensions.expected_errors(404)
def show(self, req, id):
return self._show(req, id)
def _show(self, req, id, user_id=None, **keypair_filters):
"""Return data for the given key name."""
context = req.environ['nova.context']
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'show',
target={'user_id': user_id,
'project_id': context.project_id})
try:
# The return object needs to be a dict in order to pop the 'type'
# field, if the api_version < 2.2.
keypair = self.api.get_key_pair(context, user_id, id)
keypair = self._filter_keypair(keypair, created_at=True,
deleted=True, deleted_at=True,
id=True, user_id=True,
updated_at=True, **keypair_filters)
except exception.KeypairNotFound as exc:
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
# TODO(oomichi): It is necessary to filter a response of keypair with
# _filter_keypair() when v2.1+microversions for implementing consistent
# behaviors in this keypair resource.
return {'keypair': keypair}
@wsgi.Controller.api_version("2.35")
@validation.query_schema(keypairs.index_query_schema_v235)
@extensions.expected_errors(400)
def index(self, req):
user_id = self._get_user_id(req)
return self._index(req, links=True, type=True, user_id=user_id)
@wsgi.Controller.api_version("2.10", "2.34") # noqa
@validation.query_schema(keypairs.index_query_schema_v210)
@extensions.expected_errors(())
def index(self, req):
# handle optional user-id for admin only
user_id = self._get_user_id(req)
return self._index(req, type=True, user_id=user_id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@validation.query_schema(keypairs.index_query_schema_v20)
@extensions.expected_errors(())
def index(self, req):
return self._index(req, type=True)
@wsgi.Controller.api_version("2.1", "2.1") # noqa
@validation.query_schema(keypairs.index_query_schema_v20)
@extensions.expected_errors(())
def index(self, req):
return self._index(req)
def _index(self, req, user_id=None, links=False, **keypair_filters):
"""List of keypairs for a user."""
context = req.environ['nova.context']
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'index',
target={'user_id': user_id,
'project_id': context.project_id})
if api_version_request.is_supported(req, min_version='2.35'):
limit, marker = common.get_limit_and_marker(req)
else:
limit = marker = None
try:
key_pairs = self.api.get_key_pairs(
context, user_id, limit=limit, marker=marker)
except exception.MarkerNotFound as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
key_pairs = [self._filter_keypair(key_pair, **keypair_filters)
for key_pair in key_pairs]
keypairs_list = [{'keypair': key_pair} for key_pair in key_pairs]
keypairs_dict = {'keypairs': keypairs_list}
if links:
keypairs_links = self._view_builder.get_links(req, key_pairs)
if keypairs_links:
keypairs_dict['keypairs_links'] = keypairs_links
return keypairs_dict
class Controller(wsgi.Controller):
def _add_key_name(self, req, servers):
for server in servers:
db_server = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show'/'detail' methods.
server['key_name'] = db_server['key_name']
def _show(self, req, resp_obj):
if 'server' in resp_obj.obj:
server = resp_obj.obj['server']
self._add_key_name(req, [server])
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if context.can(kp_policies.BASE_POLICY_NAME, fatal=False):
self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if 'servers' in resp_obj.obj and context.can(
kp_policies.BASE_POLICY_NAME, fatal=False):
servers = resp_obj.obj['servers']
self._add_key_name(req, servers)
class Keypairs(extensions.V21APIExtensionBase):
"""Keypair Support."""
name = "Keypairs"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension(ALIAS,
KeypairController())]
return resources
def get_controller_extensions(self):
controller = Controller()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
def server_create(self, server_dict, create_kwargs, body_deprecated_param):
# NOTE(alex_xu): The v2.1 API compat mode, we strip the spaces for
# keypair create. But we didn't strip spaces at here for
# backward-compatible some users already created keypair and name with
# leading/trailing spaces by legacy v2 API.
create_kwargs['key_name'] = server_dict.get('key_name')
def get_server_create_schema(self, version):
if version == '2.0':
return keypairs.server_create_v20
else:
return keypairs.server_create
| apache-2.0 | -6,605,378,922,066,640,000 | 37.579387 | 79 | 0.609964 | false |
turbomanage/training-data-analyst | courses/developingapps/v1.2/python/kubernetesengine/bonus/backend/quiz/gcp/pubsub.py | 12 | 1336 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
project_id = os.getenv('GCLOUD_PROJECT')
# TODO: Load the Cloud Pub/Sub module
from google.cloud import pubsub_v1
# END TODO
# TODO: Create a Pub/Sub Subscriber Client
sub_client = pubsub_v1.SubscriberClient()
# END TODO
# TODO: Create a Subscription object named worker-subscription
sub_path = sub_client.subscription_path(project_id, 'worker-subscription')
# END TODO
"""pull_feedback
Starts pulling messages from subscription
- receive callback function from calling module
- initiate the pull providing the callback function
"""
def pull_feedback(callback):
# TODO: Subscriber to the worker-subscription,
# invoking the callback
sub_client.subscribe(sub_path, callback=callback)
# END TODO
| apache-2.0 | -2,091,258,551,746,689,800 | 26.265306 | 74 | 0.758234 | false |
kfwang/Glance-OVA-OVF | glance/tests/functional/test_sqlite.py | 13 | 1348 | # Copyright 2012 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functional test cases for sqlite-specific logic"""
from glance.tests import functional
from glance.tests.utils import depends_on_exe
from glance.tests.utils import execute
from glance.tests.utils import skip_if_disabled
class TestSqlite(functional.FunctionalTest):
"""Functional tests for sqlite-specific logic"""
@depends_on_exe('sqlite3')
@skip_if_disabled
def test_big_int_mapping(self):
"""Ensure BigInteger not mapped to BIGINT"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
cmd = "sqlite3 tests.sqlite '.schema'"
exitcode, out, err = execute(cmd, raise_error=True)
self.assertNotIn('BIGINT', out)
self.stop_servers()
| apache-2.0 | -7,760,043,087,928,661,000 | 32.7 | 78 | 0.702522 | false |
adrianmugnoz/Documentacion-Divulgame | readthedocs/builds/migrations/0010_add_largest_alias.py | 16 | 9540 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'VersionAlias.largest'
db.add_column('builds_versionalias', 'largest', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'VersionAlias.largest'
db.delete_column('builds_versionalias', 'largest')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'builds.build': {
'Meta': {'ordering': "['-date']", 'object_name': 'Build'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'error': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'output': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'builds'", 'to': "orm['projects.Project']"}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'builds'", 'null': 'True', 'to': "orm['builds.Version']"})
},
'builds.version': {
'Meta': {'ordering': "['-verbose_name']", 'unique_together': "[('project', 'slug'), ('project', 'identifier')]", 'object_name': 'Version'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'built': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'builds.versionalias': {
'Meta': {'object_name': 'VersionAlias'},
'from_slug': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'largest': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aliases'", 'to': "orm['projects.Project']"}),
'to_slug': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.project': {
'Meta': {'ordering': "('slug',)", 'object_name': 'Project'},
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'default_branch': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_version': ('django.db.models.fields.CharField', [], {'default': "'latest'", 'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'django_packages_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '10'}),
'requirements_file': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'skip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "'.rst'", 'max_length': '10'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'use_virtualenv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects'", 'to': "orm['auth.User']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['builds']
| mit | -9,178,276,200,531,165,000 | 74.11811 | 182 | 0.548847 | false |
sanyaade-teachings/fpys2 | setup.py | 1 | 1399 | from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(name = "FPyS2",
version = "0.6.2",
description = "Amazon FPS Library (version 2008-09-17)",
author = "Travis Fischer",
author_email = "[email protected]",
url = "http://github.com/travisfischer/fpys2",
packages = ["fpys2"],
test_suite = "fpys2.tests",
install_requires = ["wsgi_intercept",
],
entry_points = {},
license = 'MIT License',
long_description = """\
FPyS is a library for interacting with the Amazon Flexible Payment Service.
FPys communicates with the service via the available REST interface. It handles the
details of request signing and response parsing for the application developer.
An Amazon web services account is required to begin working with FPyS in the development
environment. An approved account is required to move software into production.
Development Trac: http://fpys.achievewith.us
Mailing List: http://groups.google.com/group/fpys/
""",
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
])
| mit | 3,575,750,244,426,801,700 | 37.861111 | 88 | 0.661901 | false |
aruneral01/auto-sklearn | test/data/test_split_data.py | 5 | 3603 | '''
Created on Dec 16, 2014
@author: Aaron Klein
'''
import unittest
import numpy as np
from autosklearn.data.split_data import split_data
class Test(unittest.TestCase):
def test_split_data_regression(self):
n_points = 1000
np.random.seed(42)
n_dims = np.random.randint(1, 100)
X = np.random.rand(n_points, n_dims)
y = np.random.rand(n_points)
X_train, X_valid, Y_train, Y_valid = split_data(X, y)
self.assertEqual(X_train.shape[0], 670)
self.assertEqual(X_valid.shape[0], 330)
self.assertEqual(Y_train.shape[0], 670)
self.assertEqual(Y_valid.shape[0], 330)
self.assertEqual(X_train.shape[1], n_dims)
self.assertEqual(X_valid.shape[1], n_dims)
# Random checks
self.assertAlmostEqual(X_train[4, 2], 0.5986584841970366)
self.assertAlmostEqual(X_valid[4, 2], 0.63911512838980322)
def test_split_not_same_shape(self):
X = np.array([[3, 4], [1, 2], [3, 4]])
y = np.array([0, 0, 1, 1])
self.assertRaises(ValueError, split_data, X, y)
def test_stratify(self):
for i in range(5):
self._split_regular()
self._split_regular_classification()
self._stratify()
def _split_regular(self):
X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
y = np.array([0, 0, 0, 1, 1, 2])
X_train, X_valid, Y_train, Y_valid = split_data(X, y)
# Check shapes
self.assertEqual(X_train.shape, (4, 2))
self.assertEqual(Y_train.shape, (4, ))
self.assertEqual(X_valid.shape, (2, 2))
self.assertEqual(Y_valid.shape, (2, ))
self.assertListEqual(list(Y_valid), [0, 0])
self.assertListEqual(list(Y_train), [2, 0, 1, 1])
def _split_regular_classification(self):
X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
y = np.array([0, 0, 2, 1, 1, 0])
X_train, X_valid, Y_train, Y_valid = split_data(X, y,
classification=True)
# Check shapes
self.assertEqual(X_train.shape, (4, 2))
self.assertEqual(Y_train.shape, (4, ))
self.assertEqual(X_valid.shape, (2, 2))
self.assertEqual(Y_valid.shape, (2, ))
self.assertListEqual(list(Y_valid), [0, 1])
self.assertListEqual(list(Y_train), [0, 0, 1, 2])
def _stratify(self):
X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
y = np.array([0, 0, 0, 0, 1, 1])
X_train, X_valid, Y_train, Y_valid = split_data(X, y)
# Check shapes
self.assertEqual(X_train.shape[0], 4)
self.assertEqual(X_train.shape[1], 2)
self.assertEqual(Y_train.shape[0], 4)
self.assertEqual(X_valid.shape[0], 2)
self.assertEqual(X_valid.shape[1], 2)
self.assertEqual(Y_valid.shape[0], 2)
self.assertListEqual(list(Y_valid), [1, 0])
self.assertListEqual(list(Y_train), [0, 0, 0, 1])
def test_split_classification_many_imbalanced_classes(self):
for i in range(10):
X = np.array([range(20), range(20)]).transpose()
y = np.array((0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
4, 5))
np.random.shuffle(y)
X_train, X_valid, Y_train, Y_valid = split_data(X, y,
classification=True)
print X_train, Y_train
self.assertLessEqual(max(Y_valid), 1)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 6,755,360,988,475,327,000 | 33.980583 | 80 | 0.533999 | false |
SUNET/eduid-webapp | src/eduid_webapp/personal_data/app.py | 1 | 2998 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 NORDUnet A/S
# Copyright (c) 2019,2020 SUNET
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from typing import Any, Mapping, Optional, cast
from flask import current_app
from eduid_common.api.am import AmRelay
from eduid_common.authn.middleware import AuthnBaseApp
from eduid_common.config.parsers import load_config
from eduid_userdb.personal_data import PersonalDataUserDB
from eduid_webapp.personal_data.settings import PersonalDataConfig
class PersonalDataApp(AuthnBaseApp):
def __init__(self, config: PersonalDataConfig, **kwargs):
super().__init__(config, **kwargs)
self.conf = config
# Init celery
self.am_relay = AmRelay(config)
self.private_userdb = PersonalDataUserDB(config.mongo_uri)
current_pdata_app: PersonalDataApp = cast(PersonalDataApp, current_app)
def pd_init_app(name: str = 'personal_data', test_config: Optional[Mapping[str, Any]] = None) -> PersonalDataApp:
"""
Create an instance of an eduid personal data app.
:param name: The name of the instance, it will affect the configuration loaded.
:param test_config: Override config, used in test cases.
"""
config = load_config(typ=PersonalDataConfig, app_name=name, ns='webapp', test_config=test_config)
app = PersonalDataApp(config)
app.logger.info(f'Init {app}...')
from eduid_webapp.personal_data.views import pd_views
app.register_blueprint(pd_views)
return app
| bsd-3-clause | 6,407,471,300,470,641,000 | 37.435897 | 113 | 0.738826 | false |
robovero/python | robovero/lpc17xx_qei.py | 2 | 16790 | """Quadrature encoder interface client library functions. Find implementation
details in LPC17xx CMSIS-Compliant Standard Peripheral Firmware Driver Library
documentation."""
from internals import robocaller, cstruct
__author__ = "Neil MacMunn"
__credits__ = ["Neil MacMunn", "NXP MCU SW Application Team"]
__maintainer__ = "Neil MacMunn"
__email__ = "[email protected]"
__copyright__ = "Copyright 2011, Gumstix Inc"
__license__ = "BSD 2-Clause"
__version__ = "0.1"
# QEI Reset types
# Reset position counter
QEI_RESET_POS = (1)
# Reset Position Counter on Index
QEI_RESET_POSOnIDX = (2)
# Reset Velocity
QEI_RESET_VEL = (4)
# Reset Index Counter
QEI_RESET_IDX = (8)
# QEI Direction Invert Type Option
# Direction is not inverted
QEI_DIRINV_NONE = (0)
# Direction is complemented
QEI_DIRINV_CMPL = (1)
# QEI Signal Mode Option
# Signal operation: Quadrature phase mode
QEI_SIGNALMODE_QUAD = (0)
# Signal operation: Clock/Direction mode
QEI_SIGNALMODE_CLKDIR = (1)
# QEI Capture Mode Option
# Capture mode: Only Phase-A edges are counted (2X)
QEI_CAPMODE_2X = (0)
# Capture mode: BOTH PhA and PhB edges are counted (4X)
QEI_CAPMODE_4X = (1)
# QEI Invert Index Signal Option
# Invert Index signal option: None
QEI_INVINX_NONE = (0)
# Invert Index signal option: Enable
QEI_INVINX_EN = (1)
# QEI timer reload option
# Reload value in absolute value
QEI_TIMERRELOAD_TICKVAL = (0)
# Reload value in microsecond value
QEI_TIMERRELOAD_USVAL = (1)
# QEI Flag Status type
# Direction status
QEI_STATUS_DIR = (1<<0)
# QEI Compare Position channel option
# QEI compare position channel 0
QEI_COMPPOS_CH_0 = (0)
# QEI compare position channel 1
QEI_COMPPOS_CH_1 = (1)
# QEI compare position channel 2
QEI_COMPPOS_CH_2 = (2)
# QEI interrupt flag type
# index pulse was detected interrupt
QEI_INTFLAG_INX_Int = (1<<0)
# Velocity timer over flow interrupt
QEI_INTFLAG_TIM_Int = (1<<1)
# Capture velocity is less than compare interrupt
QEI_INTFLAG_VELC_Int = (1<<2)
# Change of direction interrupt
QEI_INTFLAG_DIR_Int = (1<<3)
# An encoder phase error interrupt
QEI_INTFLAG_ERR_Int = (1<<4)
# An encoder clock pulse was detected interrupt
QEI_INTFLAG_ENCLK_Int = (1<<5)
# position 0 compare value is equal to the current position interrupt
QEI_INTFLAG_POS0_Int = (1<<6)
# position 1 compare value is equal to the current position interrupt
QEI_INTFLAG_POS1_Int = (1<<7)
# position 2 compare value is equal to the current position interrupt
QEI_INTFLAG_POS2_Int = (1<<8)
# Index compare value is equal to the current index count interrupt
QEI_INTFLAG_REV_Int = (1<<9)
# Combined position 0 and revolution count interrupt
QEI_INTFLAG_POS0REV_Int = (1<<10)
# Combined position 1 and revolution count interrupt
QEI_INTFLAG_POS1REV_Int = (1<<11)
# Combined position 2 and revolution count interrupt
QEI_INTFLAG_POS2REV_Int = (1<<12)
class QEI_CFG_Type(cstruct):
'''QEI Configuration structure type definition.
DirectionInvert: 1-bit Direction invert option:
- QEI_DIRINV_NONE: QEI Direction is normal
- QEI_DIRINV_CMPL: QEI Direction is complemented
SignalMode: 1-bit Signal mode Option:
- QEI_SIGNALMODE_QUAD: Signal is in Quadrature phase mode
- QEI_SIGNALMODE_CLKDIR: Signal is in Clock/Direction mode
CaptureMode: 1-bit Capture Mode Option:
- QEI_CAPMODE_2X: Only Phase-A edges are counted (2X)
- QEI_CAPMODE_4X: BOTH Phase-A and Phase-B edges are counted (4X)
InvertIndex: 1-bit Invert Index Option:
- QEI_INVINX_NONE: the sense of the index input is normal
- QEI_INVINX_EN: inverts the sense of the index input
ptr: LPC1769 memory address where structure is stored. Use this in place of
the C reference operator (&).
'''
pass
class QEI_RELOADCFG_Type(cstruct):
'''Timer Reload Configuration structure type definition.
ReloadOption: Velocity Timer Reload Option, should be:
- QEI_TIMERRELOAD_TICKVAL: Reload value in absolute value
- QEI_TIMERRELOAD_USVAL: Reload value in microsecond value
ReloadValue: Velocity Timer Reload Value, 32-bit long, should be matched
with Velocity Timer Reload Option
ptr: LPC1769 memory address where structure is stored. Use this in place of
the C reference operator (&).
'''
pass
def QEI_GetTimer(QEIx):
'''Get current timer counter in QEI peripheral.
QEIx: QEI peripheral, should be LPC_QEI
return: Current timer counter in QEI peripheral
'''
return robocaller("QEI_GetTimer", "uint32_t", QEIx)
def QEI_DeInit(QEIx):
'''De-initializes the QEI peripheral registers to their default reset values.
QEIx: QEI peripheral, should be LPC_QEI
'''
return robocaller("QEI_DeInit", "void", QEIx)
def QEI_GetPosition(QEIx):
'''Get current position value in QEI peripheral.
QEIx: QEI peripheral, should be LPC_QEI
return: Current position value of QEI peripheral
'''
return robocaller("QEI_GetPosition", "uint32_t", QEIx)
def QEI_GetStatus(QEIx, ulFlagType):
'''Check whether if specified flag status is set or not.
QEIx: QEI peripheral, should be LPC_QEI
ulFlagType: Status Flag Type, should be one of the following:
- QEI_STATUS_DIR: Direction Status
return: New Status of this status flag (SET or RESET)
'''
return robocaller("QEI_GetStatus", "FlagStatus", QEIx, ulFlagType)
def QEI_Reset(QEIx, ulResetType):
'''Resets value for each type of QEI value, such as velocity, counter,
position, etc.
QEIx: QEI peripheral, should be LPC_QEI
ulResetType: QEI Reset Type, should be one of the following:
- QEI_RESET_POS: Reset Position Counter
- QEI_RESET_POSOnIDX: Reset Position Counter on Index signal
- QEI_RESET_VEL: Reset Velocity
- QEI_RESET_IDX: Reset Index Counter
'''
return robocaller("QEI_Reset", "void", QEIx, ulResetType)
def QEI_SetMaxPosition(QEIx, ulMaxPos):
'''Set max position value for QEI peripheral.
QEIx: QEI peripheral, should be LPC_QEI
ulMaxPos: Max position value to set
'''
return robocaller("QEI_SetMaxPosition", "void", QEIx, ulMaxPos)
def QEI_GetVelocity(QEIx):
'''Get current velocity pulse counter in current time period.
QEIx: QEI peripheral, should be LPC_QEI
return: Current velocity pulse counter value
'''
return robocaller("QEI_GetVelocity", "uint32_t", QEIx)
def QEI_GetVelocityCap(QEIx):
'''Get the most recently measured velocity of the QEI. When the Velocity timer
in QEI is over-flow, the current velocity value will be loaded into Velocity
Capture register..
QEIx: QEI peripheral, should be LPC_QEI
return: The most recently measured velocity value
'''
return robocaller("QEI_GetVelocityCap", "uint32_t", QEIx)
def QEI_SetPositionComp(QEIx, bPosCompCh, ulPosComp):
'''Set position compare value for QEI peripheral.
QEIx: QEI peripheral, should be LPC_QEI
bPosCompCh: Compare Position channel, should be:
- QEI_COMPPOS_CH_0: QEI compare position channel 0
- QEI_COMPPOS_CH_1: QEI compare position channel 1
- QEI_COMPPOS_CH_2: QEI compare position channel 2
ulPosComp: Compare Position value to set
'''
return robocaller("QEI_SetPositionComp", "void", QEIx, bPosCompCh, ulPosComp)
def QEI_SetDigiFilter(QEIx, ulSamplingPulse):
'''Set value of sampling count for the digital filter in QEI peripheral.
QEIx: QEI peripheral, should be LPC_QEI
ulSamplingPulse: Value of sampling count to set
'''
return robocaller("QEI_SetDigiFilter", "void", QEIx, ulSamplingPulse)
def QEI_IntSet(QEIx, ulIntType):
'''Sets (forces) specified interrupt in QEI peripheral.
QEIx: QEI peripheral, should be LPC_QEI
ulIntType: Interrupt Flag Status type, should be:
- QEI_INTFLAG_INX_Int: index pulse was detected interrupt
- QEI_INTFLAG_TIM_Int: Velocity timer over flow interrupt
- QEI_INTFLAG_VELC_Int: Capture velocity is less than compare
interrupt
- QEI_INTFLAG_DIR_Int: Change of direction interrupt
- QEI_INTFLAG_ERR_Int: An encoder phase error interrupt
- QEI_INTFLAG_ENCLK_Int: An encoder clock pulse was detected
interrupt
- QEI_INTFLAG_POS0_Int: position 0 compare value is equal to the
current position interrupt
- QEI_INTFLAG_POS1_Int: position 1 compare value is equal to the
current position interrupt
- QEI_INTFLAG_POS2_Int: position 2 compare value is equal to the
current position interrupt
- QEI_INTFLAG_REV_Int: Index compare value is equal to the current
index count interrupt
- QEI_INTFLAG_POS0REV_Int: Combined position 0 and revolution
count interrupt
- QEI_INTFLAG_POS1REV_Int: Combined position 1 and revolution
count interrupt
- QEI_INTFLAG_POS2REV_Int: Combined position 2 and revolution
count interrupt
'''
return robocaller("QEI_IntSet", "void", QEIx, ulIntType)
def QEI_GetIndex(QEIx):
'''Get current index counter of QEI peripheral.
QEIx: QEI peripheral, should be LPC_QEI
return: Current value of QEI index counter
'''
return robocaller("QEI_GetIndex", "uint32_t", QEIx)
def QEI_SetTimerReload(QEIx, QEIReloadStruct):
'''Set timer reload value for QEI peripheral. When the velocity timer is
over-flow, the value that set for Timer Reload register will be loaded
into the velocity timer for next period. The calculated velocity in RPM
therefore will be affect by this value..
QEIx: QEI peripheral, should be LPC_QEI
QEIReloadStruct: QEI reload structure
'''
return robocaller("QEI_SetTimerReload", "void", QEIx, QEIReloadStruct)
def QEI_ConfigStructInit(QIE_InitStruct):
'''Fills each QIE_InitStruct member with its default value.
- DirectionInvert = QEI_DIRINV_NONE
- SignalMode = QEI_SIGNALMODE_QUAD
- CaptureMode = QEI_CAPMODE_4X
- InvertIndex = QEI_INVINX_NONE.
QIE_InitStruct: Pointer to a QEI_CFG_Type structure which will be
initialized.
'''
return robocaller("QEI_ConfigStructInit", "void", QIE_InitStruct)
def QEI_SetVelocityComp(QEIx, ulVelComp):
'''Set Velocity Compare value for QEI peripheral.
QEIx: QEI peripheral, should be LPC_QEI
ulVelComp: Compare Velocity value to set
'''
return robocaller("QEI_SetVelocityComp", "void", QEIx, ulVelComp)
def QEI_Init(QEIx, QEI_ConfigStruct):
'''Initializes the QEI peripheral according to the specified parameters in the
QEI_ConfigStruct.
QEIx: QEI peripheral, should be LPC_QEI
QEI_ConfigStruct: Pointer to a QEI_CFG_Type structure that contains the
configuration information for the specified QEI peripheral
'''
return robocaller("QEI_Init", "void", QEIx, QEI_ConfigStruct)
def QEI_IntCmd(QEIx, ulIntType, NewState):
'''Enable/Disable specified interrupt in QEI peripheral.
QEIx: QEI peripheral, should be LPC_QEI
ulIntType: Interrupt Flag Status type, should be:
- QEI_INTFLAG_INX_Int: index pulse was detected interrupt
- QEI_INTFLAG_TIM_Int: Velocity timer over flow interrupt
- QEI_INTFLAG_VELC_Int: Capture velocity is less than compare
interrupt
- QEI_INTFLAG_DIR_Int: Change of direction interrupt
- QEI_INTFLAG_ERR_Int: An encoder phase error interrupt
- QEI_INTFLAG_ENCLK_Int: An encoder clock pulse was detected
interrupt
- QEI_INTFLAG_POS0_Int: position 0 compare value is equal to the
current position interrupt
- QEI_INTFLAG_POS1_Int: position 1 compare value is equal to the
current position interrupt
- QEI_INTFLAG_POS2_Int: position 2 compare value is equal to the
current position interrupt
- QEI_INTFLAG_REV_Int: Index compare value is equal to the current
index count interrupt
- QEI_INTFLAG_POS0REV_Int: Combined position 0 and revolution
count interrupt
- QEI_INTFLAG_POS1REV_Int: Combined position 1 and revolution
count interrupt
- QEI_INTFLAG_POS2REV_Int: Combined position 2 and revolution
count interrupt
NewState: New function state, should be:
- DISABLE
- ENABLE
'''
return robocaller("QEI_IntCmd", "void", QEIx, ulIntType, NewState)
def QEI_IntClear(QEIx, ulIntType):
'''Clear (force) specified interrupt (pending) in QEI peripheral.
QEIx: QEI peripheral, should be LPC_QEI
ulIntType: Interrupt Flag Status type, should be:
- QEI_INTFLAG_INX_Int: index pulse was detected interrupt
- QEI_INTFLAG_TIM_Int: Velocity timer over flow interrupt
- QEI_INTFLAG_VELC_Int: Capture velocity is less than compare
interrupt
- QEI_INTFLAG_DIR_Int: Change of direction interrupt
- QEI_INTFLAG_ERR_Int: An encoder phase error interrupt
- QEI_INTFLAG_ENCLK_Int: An encoder clock pulse was detected
interrupt
- QEI_INTFLAG_POS0_Int: position 0 compare value is equal to the
current position interrupt
- QEI_INTFLAG_POS1_Int: position 1 compare value is equal to the
current position interrupt
- QEI_INTFLAG_POS2_Int: position 2 compare value is equal to the
current position interrupt
- QEI_INTFLAG_REV_Int: Index compare value is equal to the current
index count interrupt
- QEI_INTFLAG_POS0REV_Int: Combined position 0 and revolution
count interrupt
- QEI_INTFLAG_POS1REV_Int: Combined position 1 and revolution
count interrupt
- QEI_INTFLAG_POS2REV_Int: Combined position 2 and revolution
count interrupt
'''
return robocaller("QEI_IntClear", "void", QEIx, ulIntType)
def QEI_GetIntStatus(QEIx, ulIntType):
'''Check whether if specified interrupt flag status in QEI peripheral
is set or not.
QEIx: QEI peripheral, should be LPC_QEI
ulIntType: Interrupt Flag Status type, should be:
- QEI_INTFLAG_INX_Int: index pulse was detected interrupt
- QEI_INTFLAG_TIM_Int: Velocity timer over flow interrupt
- QEI_INTFLAG_VELC_Int: Capture velocity is less than compare
interrupt
- QEI_INTFLAG_DIR_Int: Change of direction interrupt
- QEI_INTFLAG_ERR_Int: An encoder phase error interrupt
- QEI_INTFLAG_ENCLK_Int: An encoder clock pulse was detected
interrupt
- QEI_INTFLAG_POS0_Int: position 0 compare value is equal to the
current position interrupt
- QEI_INTFLAG_POS1_Int: position 1 compare value is equal to the
current position interrupt
- QEI_INTFLAG_POS2_Int: position 2 compare value is equal to the
current position interrupt
- QEI_INTFLAG_REV_Int: Index compare value is equal to the current
index count interrupt
- QEI_INTFLAG_POS0REV_Int: Combined position 0 and revolution
count interrupt
- QEI_INTFLAG_POS1REV_Int: Combined position 1 and revolution
count interrupt
- QEI_INTFLAG_POS2REV_Int: Combined position 2 and revolution
count interrupt
return: New State of specified interrupt flag status (SET or RESET)
'''
return robocaller("QEI_GetIntStatus", "FlagStatus", QEIx, ulIntType)
def QEI_CalculateRPM(QEIx, ulVelCapValue, ulPPR):
'''Calculates the actual velocity in RPM passed via velocity capture value and
Pulse Per Round (of the encoder) value parameter input.
QEIx: QEI peripheral, should be LPC_QEI
ulVelCapValue: Velocity capture input value from QEI_GetVelocityCap()
function
ulPPR: Pulse per round of encoder
return: The actual value of velocity in RPM (revolutions per minute)
'''
return robocaller("QEI_CalculateRPM", "uint32_t", QEIx, ulVelCapValue, ulPPR)
def QEI_SetIndexComp(QEIx, ulIndexComp):
'''Set value for index compare in QEI peripheral.
QEIx: QEI peripheral, should be LPC_QEI
ulIndexComp: Compare Index Value to set
'''
return robocaller("QEI_SetIndexComp", "void", QEIx, ulIndexComp)
| bsd-2-clause | -4,555,719,060,852,139,500 | 37.333333 | 81 | 0.672781 | false |
pknight007/electrum-vtc | lib/tests/test_util.py | 2 | 3564 | import unittest
from lib.util import format_satoshis, parse_URI
class TestUtil(unittest.TestCase):
def test_format_satoshis(self):
result = format_satoshis(1234)
expected = "0.00001234"
self.assertEqual(expected, result)
def test_format_satoshis_diff_positive(self):
result = format_satoshis(1234, is_diff=True)
expected = "+0.00001234"
self.assertEqual(expected, result)
def test_format_satoshis_diff_negative(self):
result = format_satoshis(-1234, is_diff=True)
expected = "-0.00001234"
self.assertEqual(expected, result)
def _do_test_parse_URI(self, uri, expected):
result = parse_URI(uri)
self.assertEqual(expected, result)
def test_parse_URI_address(self):
self._do_test_parse_URI('litecoin:LectrumELqJWMECz7W2iarBpT4VvAPqwAv',
{'address': 'LectrumELqJWMECz7W2iarBpT4VvAPqwAv'})
def test_parse_URI_only_address(self):
self._do_test_parse_URI('LectrumELqJWMECz7W2iarBpT4VvAPqwAv',
{'address': 'LectrumELqJWMECz7W2iarBpT4VvAPqwAv'})
def test_parse_URI_address_label(self):
self._do_test_parse_URI('litecoin:LectrumELqJWMECz7W2iarBpT4VvAPqwAv?label=electrum%20test',
{'address': 'LectrumELqJWMECz7W2iarBpT4VvAPqwAv', 'label': 'electrum test'})
def test_parse_URI_address_message(self):
self._do_test_parse_URI('litecoin:LectrumELqJWMECz7W2iarBpT4VvAPqwAv?message=electrum%20test',
{'address': 'LectrumELqJWMECz7W2iarBpT4VvAPqwAv', 'message': 'electrum test', 'memo': 'electrum test'})
def test_parse_URI_address_amount(self):
self._do_test_parse_URI('litecoin:LectrumELqJWMECz7W2iarBpT4VvAPqwAv?amount=0.0003',
{'address': 'LectrumELqJWMECz7W2iarBpT4VvAPqwAv', 'amount': 30000})
def test_parse_URI_address_request_url(self):
self._do_test_parse_URI('litecoin:LectrumELqJWMECz7W2iarBpT4VvAPqwAv?r=http://domain.tld/page?h%3D2a8628fc2fbe',
{'address': 'LectrumELqJWMECz7W2iarBpT4VvAPqwAv', 'r': 'http://domain.tld/page?h=2a8628fc2fbe'})
def test_parse_URI_ignore_args(self):
self._do_test_parse_URI('litecoin:LectrumELqJWMECz7W2iarBpT4VvAPqwAv?test=test',
{'address': 'LectrumELqJWMECz7W2iarBpT4VvAPqwAv', 'test': 'test'})
def test_parse_URI_multiple_args(self):
self._do_test_parse_URI('litecoin:LectrumELqJWMECz7W2iarBpT4VvAPqwAv?amount=0.00004&label=electrum-test&message=electrum%20test&test=none&r=http://domain.tld/page',
{'address': 'LectrumELqJWMECz7W2iarBpT4VvAPqwAv', 'amount': 4000, 'label': 'electrum-test', 'message': u'electrum test', 'memo': u'electrum test', 'r': 'http://domain.tld/page', 'test': 'none'})
def test_parse_URI_no_address_request_url(self):
self._do_test_parse_URI('litecoin:?r=http://domain.tld/page?h%3D2a8628fc2fbe',
{'r': 'http://domain.tld/page?h=2a8628fc2fbe'})
def test_parse_URI_invalid_address(self):
self.assertRaises(BaseException, parse_URI, 'litecoin:invalidaddress')
def test_parse_URI_invalid(self):
self.assertRaises(BaseException, parse_URI, 'notlitecoin:LectrumELqJWMECz7W2iarBpT4VvAPqwAv')
def test_parse_URI_parameter_polution(self):
self.assertRaises(Exception, parse_URI, 'litecoin:LectrumELqJWMECz7W2iarBpT4VvAPqwAv?amount=0.0003&label=test&amount=30.0')
| mit | -2,715,683,667,487,076,000 | 49.914286 | 226 | 0.66358 | false |
ESOedX/edx-platform | openedx/core/djangoapps/cors_csrf/middleware.py | 2 | 6122 | """
Middleware for handling CSRF checks with CORS requests
CSRF and referrer domain checks
-------------------------------
When processing HTTPS requests, the default CSRF middleware checks that the referer
domain and protocol is the same as the request's domain and protocol. This is meant
to avoid a type of attack for sites which serve their content with both HTTP and HTTPS,
with a man in the middle on the HTTP requests.
https://github.com/django/django/blob/b91c385e324f1cb94d20e2ad146372c259d51d3b/django/middleware/csrf.py#L117
This doesn't work well with CORS requests, which aren't vulnerable to this attack when
the server from which the request is coming uses HTTPS too, as it prevents the man in the
middle attack vector.
We thus do the CSRF check of requests coming from an authorized CORS host separately
in this middleware, applying the same protections as the default CSRF middleware, but
without the referrer check, when both the request and the referer use HTTPS.
CSRF cookie domains
-------------------
In addition, in order to make cross-domain AJAX calls to CSRF-protected end-points,
we need to send the CSRF token in the HTTP header of the request.
The simple way to do this would be to set the CSRF_COOKIE_DOMAIN to ".edx.org",
but unfortunately this can cause problems. For example, suppose that
"first.edx.org" sets the cookie with domain ".edx.org", but "second.edx.org"
sets a cookie with domain "second.edx.org". In this case, the browser
would have two different CSRF tokens set (one for each cookie domain),
which can cause non-deterministic failures depending on which cookie
is sent first.
For this reason, we add a second cookie that (a) has the domain set to ".edx.org",
but (b) does NOT have the same name as the CSRF_COOKIE_NAME. Clients making
cross-domain requests can use this cookie instead of the subdomain-specific
CSRF cookie.
"""
from __future__ import absolute_import
import logging
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, MiddlewareNotUsed
from django.middleware.csrf import CsrfViewMiddleware
from .helpers import is_cross_domain_request_allowed, skip_cross_domain_referer_check
log = logging.getLogger(__name__)
class CorsCSRFMiddleware(CsrfViewMiddleware):
"""
Middleware for handling CSRF checks with CORS requests
"""
def __init__(self):
"""Disable the middleware if the feature flag is disabled. """
if not settings.FEATURES.get('ENABLE_CORS_HEADERS'):
raise MiddlewareNotUsed()
def process_view(self, request, callback, callback_args, callback_kwargs):
"""Skip the usual CSRF referer check if this is an allowed cross-domain request. """
if not is_cross_domain_request_allowed(request):
log.debug("Could not disable CSRF middleware referer check for cross-domain request.")
return
with skip_cross_domain_referer_check(request):
return super(CorsCSRFMiddleware, self).process_view(request, callback, callback_args, callback_kwargs)
class CsrfCrossDomainCookieMiddleware(object):
"""Set an additional "cross-domain" CSRF cookie.
Usage:
1) Decorate a view with `@ensure_csrf_cookie_cross_domain`.
2) Set `CROSS_DOMAIN_CSRF_COOKIE_NAME` and `CROSS_DOMAIN_CSRF_COOKIE_DOMAIN`
in settings.
3) Add the domain to `CORS_ORIGIN_WHITELIST`
4) Enable `FEATURES['ENABLE_CROSS_DOMAIN_CSRF_COOKIE']`
For testing, it is often easier to relax the security checks by setting:
* `CORS_ALLOW_INSECURE = True`
* `CORS_ORIGIN_ALLOW_ALL = True`
"""
def __init__(self):
"""Disable the middleware if the feature is not enabled. """
if not settings.FEATURES.get('ENABLE_CROSS_DOMAIN_CSRF_COOKIE'):
raise MiddlewareNotUsed()
if not getattr(settings, 'CROSS_DOMAIN_CSRF_COOKIE_NAME', ''):
raise ImproperlyConfigured(
"You must set `CROSS_DOMAIN_CSRF_COOKIE_NAME` when "
"`FEATURES['ENABLE_CROSS_DOMAIN_CSRF_COOKIE']` is True."
)
if not getattr(settings, 'CROSS_DOMAIN_CSRF_COOKIE_DOMAIN', ''):
raise ImproperlyConfigured(
"You must set `CROSS_DOMAIN_CSRF_COOKIE_DOMAIN` when "
"`FEATURES['ENABLE_CROSS_DOMAIN_CSRF_COOKIE']` is True."
)
def process_response(self, request, response):
"""Set the cross-domain CSRF cookie. """
# Check whether this is a secure request from a domain on our whitelist.
if not is_cross_domain_request_allowed(request):
log.debug("Could not set cross-domain CSRF cookie.")
return response
# Check whether (a) the CSRF middleware has already set a cookie, and
# (b) this is a view decorated with `@ensure_cross_domain_csrf_cookie`
# If so, we can send the cross-domain CSRF cookie.
should_set_cookie = (
request.META.get('CROSS_DOMAIN_CSRF_COOKIE_USED', False) and
request.META.get('CSRF_COOKIE_USED', False) and
request.META.get('CSRF_COOKIE') is not None
)
if should_set_cookie:
# This is very similar to the code in Django's CSRF middleware
# implementation, with two exceptions:
# 1) We change the cookie name and domain so it can be used cross-domain.
# 2) We always set "secure" to True, so that the CSRF token must be
# sent over a secure connection.
response.set_cookie(
settings.CROSS_DOMAIN_CSRF_COOKIE_NAME,
request.META['CSRF_COOKIE'],
max_age=settings.CSRF_COOKIE_AGE,
domain=settings.CROSS_DOMAIN_CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=True
)
log.debug(
u"Set cross-domain CSRF cookie '%s' for domain '%s'",
settings.CROSS_DOMAIN_CSRF_COOKIE_NAME,
settings.CROSS_DOMAIN_CSRF_COOKIE_DOMAIN
)
return response
| agpl-3.0 | -606,991,972,538,857,500 | 40.087248 | 114 | 0.672819 | false |
alheinecke/tensorflow-xsmm | tensorflow/python/ops/embedding_ops.py | 18 | 16718 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
def _do_gather(params, ids, validate_indices=True, name=None):
"""Deals with doing gather differently for resource variables."""
if isinstance(params, resource_variable_ops.ResourceVariable):
return params.sparse_read(ids, name=name)
return array_ops.gather(
params, ids, name=name, validate_indices=validate_indices)
def embedding_lookup(params, ids, partition_strategy="mod", name=None,
validate_indices=True, max_norm=None):
"""Looks up `ids` in a list of embedding tensors.
This function is used to perform parallel lookups on the list of
tensors in `params`. It is a generalization of
@{tf.gather}, where `params` is
interpreted as a partitioning of a large embedding tensor. `params` may be
a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
If `len(params) > 1`, each element `id` of `ids` is partitioned between
the elements of `params` according to the `partition_strategy`.
In all strategies, if the id space does not evenly divide the number of
partitions, each of the first `(max_id + 1) % len(params)` partitions will
be assigned one more id.
If `partition_strategy` is `"mod"`, we assign each id to partition
`p = id % len(params)`. For instance,
13 ids are split across 5 partitions as:
`[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]`
If `partition_strategy` is `"div"`, we assign ids to partitions in a
contiguous manner. In this case, 13 ids are split across 5 partitions as:
`[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`
The results of the lookup are concatenated into a dense
tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked
up in `params`.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`.
name: A name for the operation (optional).
validate_indices: Whether or not to validate gather indices.
max_norm: If not None, embedding values are l2-normalized to the value of
max_norm.
Returns:
A `Tensor` with the same type as the tensors in `params`.
Raises:
ValueError: If `params` is empty.
"""
if params is None or params == []: # pylint: disable=g-explicit-bool-comparison
raise ValueError("Need at least one param")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
def maybe_normalize(x):
if max_norm is not None:
if x.get_shape().ndims is not None:
ndims = x.get_shape().ndims
else:
ndims = array_ops.size(array_ops.shape(x))
return clip_ops.clip_by_norm(x, max_norm, axes=list(range(1, ndims)))
return x
with ops.name_scope(name, "embedding_lookup", params + [ids]) as name:
np = len(params) # Number of partitions
# Preserve the resource variable status to avoid accidental dense reads.
if not any(isinstance(p, resource_variable_ops.ResourceVariable)
for p in params):
params = ops.convert_n_to_tensor_or_indexed_slices(params, name="params")
if np == 1:
with ops.colocate_with(params[0]):
return maybe_normalize(
_do_gather(
params[0], ids, validate_indices=validate_indices, name=name))
else:
ids = ops.convert_to_tensor(ids, name="ids")
flat_ids = array_ops.reshape(ids, [-1])
original_indices = math_ops.range(array_ops.size(flat_ids))
# Create p_assignments and set new_ids depending on the strategy.
if partition_strategy == "mod":
p_assignments = flat_ids % np
new_ids = flat_ids // np
elif partition_strategy == "div":
# Compute num_total_ids as the sum of dim-0 of params, then assign to
# partitions based on a constant number of ids per partition. Optimize
# if we already know the full shape statically.
dim_0_size = params[0].get_shape()[0]
for p in xrange(1, np):
dim_0_size += params[p].get_shape()[0]
if dim_0_size.value:
num_total_ids = constant_op.constant(dim_0_size.value, flat_ids.dtype)
else:
dim_0_sizes = []
for p in xrange(np):
if params[p].get_shape()[0].value is not None:
dim_0_sizes.append(params[p].get_shape()[0].value)
else:
with ops.colocate_with(params[p]):
dim_0_sizes.append(array_ops.shape(params[p])[0])
num_total_ids = math_ops.reduce_sum(
math_ops.cast(array_ops.stack(dim_0_sizes), flat_ids.dtype))
ids_per_partition = num_total_ids // np
extras = num_total_ids % np
p_assignments = math_ops.maximum(
flat_ids // (ids_per_partition + 1),
(flat_ids - extras) // ids_per_partition)
# Emulate a conditional using a boolean indicator tensor
is_in_first_extras_partitions = math_ops.cast(
p_assignments < extras, flat_ids.dtype)
new_ids = (
is_in_first_extras_partitions * (
flat_ids % (ids_per_partition + 1)) +
(1 - is_in_first_extras_partitions) * (
(flat_ids - extras) % ids_per_partition))
else:
raise ValueError("Unrecognized partition strategy: " +
partition_strategy)
# Cast partition assignments to int32 for use in dynamic_partition.
# There really should not be more than 2^32 partitions.
p_assignments = math_ops.cast(p_assignments, dtypes.int32)
# Partition list of ids based on assignments into np separate lists
gather_ids = data_flow_ops.dynamic_partition(new_ids, p_assignments, np)
# Similarly, partition the original indices.
pindices = data_flow_ops.dynamic_partition(original_indices,
p_assignments, np)
# Do np separate lookups, finding embeddings for plist[p] in params[p]
partitioned_result = []
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result.append(
_do_gather(params[p], gather_ids[p],
validate_indices=validate_indices))
# Stitch these back together
ret = data_flow_ops.dynamic_stitch(pindices, partitioned_result,
name=name)
# Reshape to reverse the flattening of ids.
element_shape = params[0].get_shape()[1:]
for p in params[1:]:
element_shape = element_shape.merge_with(p.get_shape()[1:])
if element_shape.is_fully_defined():
ret = array_ops.reshape(ret,
array_ops.concat(
[array_ops.shape(ids), element_shape], 0))
else:
# It's important that we compute params[0].shape on the right device
# to avoid data motion.
with ops.colocate_with(params[0]):
params_shape = array_ops.shape(params[0])
ret = array_ops.reshape(ret,
array_ops.concat([
array_ops.shape(ids),
array_ops.slice(params_shape, [1], [-1])
], 0))
# output shape = ids.shape + params[*].shape[1:]
# Normally the reshape is sufficient, but setting shape explicitly
# teaches shape inference that params[1:].get_shape() matters.
ret.set_shape(ids.get_shape().concatenate(element_shape))
return maybe_normalize(ret)
def embedding_lookup_sparse(params, sp_ids, sp_weights,
partition_strategy="mod",
name=None,
combiner=None,
max_norm=None):
"""Computes embeddings for the given ids and weights.
This op assumes that there is at least one id for each row in the dense tensor
represented by sp_ids (i.e. there are no rows with empty features), and that
all the indices of sp_ids are in canonical row-major order.
It also assumes that all id values lie in the range [0, p0), where p0
is the sum of the size of params along dimension 0.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
sp_ids: N x M SparseTensor of int64 ids (typically from FeatureValueToId),
where N is typically batch size and M is arbitrary.
sp_weights: either a SparseTensor of float / double weights, or None to
indicate all weights should be taken to be 1. If specified, sp_weights
must have exactly the same shape and indices as sp_ids.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: Optional name for the op.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
and "sum" are supported.
"sum" computes the weighted sum of the embedding results for each row.
"mean" is the weighted sum divided by the total weight.
"sqrtn" is the weighted sum divided by the square root of the sum of the
squares of the weights.
max_norm: If not None, each embedding is normalized to have l2 norm equal
to max_norm before combining.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by sp_ids, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
In other words, if
shape(combined params) = [p0, p1, ..., pm]
and
shape(sp_ids) = shape(sp_weights) = [d0, d1, ..., dn]
then
shape(output) = [d0, d1, ..., dn-1, p1, ..., pm].
For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are
[0, 0]: id 1, weight 2.0
[0, 1]: id 3, weight 0.5
[1, 0]: id 0, weight 1.0
[2, 3]: id 1, weight 3.0
with `combiner`="mean", then the output will be a 3x20 matrix where
output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5)
output[1, :] = params[0, :] * 1.0
output[2, :] = params[1, :] * 3.0
Raises:
TypeError: If sp_ids is not a SparseTensor, or if sp_weights is neither
None nor SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
if not isinstance(sp_ids, sparse_tensor.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, sparse_tensor.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
sp_ids.indices.get_shape().assert_is_compatible_with(
sp_weights.indices.get_shape())
sp_ids.dense_shape.get_shape().assert_is_compatible_with(
sp_weights.dense_shape.get_shape())
# TODO(yleon): Add enhanced node assertions to verify that sp_ids and
# sp_weights have equal indices and shapes.
with ops.name_scope(name, "embedding_lookup_sparse",
params + [sp_ids]) as name:
segment_ids = sp_ids.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
ids = sp_ids.values
if ignore_weights:
ids, idx = array_ops.unique(ids)
else:
idx = None
embeddings = embedding_lookup(
params, ids, partition_strategy=partition_strategy, max_norm=max_norm)
if not ignore_weights:
weights = sp_weights.values
if weights.dtype != embeddings.dtype:
weights = math_ops.cast(weights, embeddings.dtype)
# Reshape weights to allow broadcast
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones],
0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
# Set the weight shape, since after reshaping to bcast_weights_shape,
# the shape becomes None.
if embeddings.get_shape().ndims is not None:
weights.set_shape(orig_weights_shape.concatenate(
[1 for _ in range(embeddings.get_shape().ndims - 1)]))
embeddings *= weights
if combiner == "sum":
embeddings = math_ops.segment_sum(embeddings, segment_ids, name=name)
elif combiner == "mean":
embeddings = math_ops.segment_sum(embeddings, segment_ids)
weight_sum = math_ops.segment_sum(weights, segment_ids)
embeddings = math_ops.div(embeddings, weight_sum, name=name)
elif combiner == "sqrtn":
embeddings = math_ops.segment_sum(embeddings, segment_ids)
weights_squared = math_ops.pow(weights, 2)
weight_sum = math_ops.segment_sum(weights_squared, segment_ids)
weight_sum_sqrt = math_ops.sqrt(weight_sum)
embeddings = math_ops.div(embeddings, weight_sum_sqrt, name=name)
else:
assert False, "Unrecognized combiner"
else:
assert idx is not None
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(embeddings, idx, segment_ids,
name=name)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(embeddings, idx, segment_ids,
name=name)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(embeddings, idx,
segment_ids, name=name)
else:
assert False, "Unrecognized combiner"
return embeddings
| apache-2.0 | 4,726,363,033,194,994,000 | 43.820375 | 82 | 0.640089 | false |
RobinQuetin/CAIRIS-web | cairis/cairis/controllers/AttackerController.py | 1 | 23270 | import httplib
from flask import request, session, make_response
from flask.ext.restful import Resource
from flask_restful_swagger import swagger
from CairisHTTPError import ARMHTTPError
from data.AttackerDAO import AttackerDAO
from tools.JsonConverter import json_serialize
from tools.MessageDefinitions import AttackerMessage, ValueTypeMessage
from tools.ModelDefinitions import AttackerModel, ValueTypeModel
from tools.SessionValidator import get_session_id
__author__ = 'Robin Quetin'
class AttackersAPI(Resource):
#region Swagger Doc
@swagger.operation(
notes='Get all attackers',
nickname='attackers-get',
responseClass=AttackerModel.__name__,
responseContainer='List',
parameters=[
{
"name": "constraint_id",
"description": "The constraint to use when querying the database",
"default": -1,
"required": False,
"allowMultiple": False,
"dataType": int.__name__,
"paramType": "query"
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
#endregion
def get(self):
session_id = get_session_id(session, request)
constraint_id = request.args.get('constraint_id', -1)
dao = AttackerDAO(session_id)
attackers = dao.get_attackers(constraint_id=constraint_id)
dao.close()
resp = make_response(json_serialize(attackers, session_id=session_id), httplib.OK)
resp.contenttype = 'application/json'
return resp
# region Swagger Doc
@swagger.operation(
notes='Creates a new attacker',
nickname='attackers-post',
parameters=[
{
"name": "body",
"description": "The serialized version of the new attacker to be added",
"required": True,
"allowMultiple": False,
"type": AttackerMessage.__name__,
"paramType": "body"
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
},
{
'code': ARMHTTPError.status_code,
'message': ARMHTTPError.status
}
]
)
# endregion
def post(self):
session_id = get_session_id(session, request)
dao = AttackerDAO(session_id)
new_attacker = dao.from_json(request)
attacker_id = dao.add_attacker(new_attacker)
dao.close()
resp_dict = {'message': 'Attacker successfully added', 'attacker_id': attacker_id}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.contenttype = 'application/json'
return resp
class AttackerByNameAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get an attacker by name',
nickname='attacker-by-name-get',
responseClass=AttackerModel.__name__,
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, name):
session_id = get_session_id(session, request)
dao = AttackerDAO(session_id)
attacker = dao.get_attacker_by_name(name=name)
dao.close()
resp = make_response(json_serialize(attacker, session_id=session_id), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
# region Swagger Docs
@swagger.operation(
notes='Updates an attacker',
nickname='attacker-by-name-put',
parameters=[
{
'name': 'body',
"description": "JSON serialized version of the attacker to be updated",
"required": True,
"allowMultiple": False,
'type': AttackerMessage.__name__,
'paramType': 'body'
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'The provided file is not a valid XML file'
},
{
'code': httplib.BAD_REQUEST,
'message': '''Some parameters are missing. Be sure 'attacker' is defined.'''
}
]
)
# endregion
def put(self, name):
session_id = get_session_id(session, request)
dao = AttackerDAO(session_id)
req = dao.from_json(request)
dao.update_attacker(req, name=name)
dao.close()
resp_dict = {'message': 'Attacker successfully updated'}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
# region Swagger Doc
@swagger.operation(
notes='Deletes an existing attacker',
nickname='attacker-by-name-delete',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.NOT_FOUND,
'message': 'The provided attacker name could not be found in the database'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
}
]
)
# endregion
def delete(self, name):
session_id = get_session_id(session, request)
dao = AttackerDAO(session_id)
dao.delete_attacker(name=name)
dao.close()
resp_dict = {'message': 'Attacker successfully deleted'}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
class AttackerCapabilitiesAPI(Resource):
#region Swagger Doc
@swagger.operation(
notes='Get all attacker capabilities',
nickname='attacker-capabilities-get',
responseClass=ValueTypeModel.__name__,
responseContainer='List',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
#endregion
def get(self):
session_id = get_session_id(session, request)
environment_name = request.args.get('environment', '')
dao = AttackerDAO(session_id)
assets = dao.get_attacker_capabilities(environment_name=environment_name)
dao.close()
resp = make_response(json_serialize(assets, session_id=session_id), httplib.OK)
resp.contenttype = 'application/json'
return resp
# region Swagger Doc
@swagger.operation(
notes='Creates a new attacker capability',
nickname='attacker-capability-by-name-post',
parameters=[
{
"name": "body",
"description": "The serialized version of the new attacker capability to be added",
"required": True,
"allowMultiple": False,
"type": ValueTypeMessage.__name__,
"paramType": "body"
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
}
]
)
# endregion
def post(self):
session_id = get_session_id(session, request)
environment_name = request.args.get('environment', '')
dao = AttackerDAO(session_id)
new_value_type = dao.type_from_json(request)
attacker_capability_id = dao.add_attacker_capability(new_value_type, environment_name=environment_name)
dao.close()
resp_dict = {'message': 'Attacker capability successfully added', 'attacker_capability_id': attacker_capability_id}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.contenttype = 'application/json'
return resp
class AttackerCapabilityByNameAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get an attacker capability by name',
nickname='attacker-capability-by-name-get',
responseClass=ValueTypeModel.__name__,
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, name):
session_id = get_session_id(session, request)
environment_name = request.args.get('environment', '')
dao = AttackerDAO(session_id)
attacker_capability = dao.get_attacker_capability_by_name(name=name, environment_name=environment_name)
dao.close()
resp = make_response(json_serialize(attacker_capability, session_id=session_id), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
# region Swagger Docs
@swagger.operation(
notes='Updates an attacker capability',
nickname='attacker-capability-by-name-put',
parameters=[
{
'name': 'body',
"description": "",
"required": True,
"allowMultiple": False,
'type': ValueTypeMessage.__name__,
'paramType': 'body'
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'The provided file is not a valid XML file'
},
{
'code': httplib.BAD_REQUEST,
'message': '''Some parameters are missing. Be sure 'asset' is defined.'''
}
]
)
# endregion
def put(self, name):
session_id = get_session_id(session, request)
environment_name = request.args.get('environment', '')
dao = AttackerDAO(session_id)
attacker_capability = dao.type_from_json(request)
dao.update_attacker_capability(attacker_capability, name=name, environment_name=environment_name)
dao.close()
resp_dict = {'message': 'Attacker capability successfully updated'}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
# region Swagger Doc
@swagger.operation(
notes='Deletes an existing attacker capability',
nickname='attacker-capability-by-name-delete',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.NOT_FOUND,
'message': 'The provided asset name could not be found in the database'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
}
]
)
# endregion
def delete(self, name):
session_id = get_session_id(session, request)
environment_name = request.args.get('environment', '')
dao = AttackerDAO(session_id)
dao.delete_attacker_capability(name=name, environment_name=environment_name)
dao.close()
resp_dict = {'message': 'Attacker capability successfully deleted'}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
class AttackerMotivationsAPI(Resource):
#region Swagger Doc
@swagger.operation(
notes='Get all attacker motivations',
nickname='attackers-motivations-get',
responseClass=ValueTypeModel.__name__,
responseContainer='List',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
#endregion
def get(self):
session_id = get_session_id(session, request)
environment_name = request.args.get('environment', '')
dao = AttackerDAO(session_id)
assets = dao.get_attacker_motivations(environment_name=environment_name)
dao.close()
resp = make_response(json_serialize(assets, session_id=session_id), httplib.OK)
resp.contenttype = 'application/json'
return resp
# region Swagger Doc
@swagger.operation(
notes='Creates a new attacker motivation',
nickname='attacker-motivation-by-name-post',
parameters=[
{
"name": "body",
"description": "The serialized version of the new attacker motivation to be added",
"required": True,
"allowMultiple": False,
"type": ValueTypeMessage.__name__,
"paramType": "body"
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
}
]
)
# endregion
def post(self):
session_id = get_session_id(session, request)
environment_name = request.args.get('environment', '')
dao = AttackerDAO(session_id)
new_value_type = dao.type_from_json(request)
attacker_motivation_id = dao.add_attacker_motivation(new_value_type, environment_name=environment_name)
dao.close()
resp_dict = {'message': 'Attacker motivation successfully added', 'attacker_motivation_id': attacker_motivation_id}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.contenttype = 'application/json'
return resp
class AttackerMotivationByNameAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get an attacker motivation by name',
nickname='attacker-motivation-by-name-get',
responseClass=ValueTypeModel.__name__,
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, name):
session_id = get_session_id(session, request)
environment_name = request.args.get('environment', '')
dao = AttackerDAO(session_id)
attacker_motivation = dao.get_attacker_motivation_by_name(name=name, environment_name=environment_name)
dao.close()
resp = make_response(json_serialize(attacker_motivation, session_id=session_id), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
# region Swagger Docs
@swagger.operation(
notes='Updates an attacker motivation',
nickname='attacker-motivation-by-name-put',
parameters=[
{
'name': 'body',
"description": "",
"required": True,
"allowMultiple": False,
'type': ValueTypeMessage.__name__,
'paramType': 'body'
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'The provided file is not a valid XML file'
},
{
'code': httplib.BAD_REQUEST,
'message': '''Some parameters are missing. Be sure 'asset' is defined.'''
}
]
)
# endregion
def put(self, name):
session_id = get_session_id(session, request)
environment_name = request.args.get('environment', '')
dao = AttackerDAO(session_id)
attacker_motivation = dao.type_from_json(request)
dao.update_attacker_motivation(attacker_motivation, name=name, environment_name=environment_name)
dao.close()
resp_dict = {'message': 'Attacker motivation successfully updated'}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
# region Swagger Doc
@swagger.operation(
notes='Deletes an existing attacker motivation',
nickname='attacker-motivation-by-name-delete',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.NOT_FOUND,
'message': 'The provided asset name could not be found in the database'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
}
]
)
# endregion
def delete(self, name):
session_id = get_session_id(session, request)
environment_name = request.args.get('environment', '')
dao = AttackerDAO(session_id)
dao.delete_attacker_motivation(name=name, environment_name=environment_name)
dao.close()
resp_dict = {'message': 'Attacker motivation successfully deleted'}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp | apache-2.0 | -4,933,528,991,966,499,000 | 33.121701 | 123 | 0.521874 | false |
oceanobservatories/mi-instrument | mi/dataset/driver/vel3d_cd/dcl/vel3d_cd_dcl_telemetered_driver.py | 7 | 1529 | #!/usr/bin/env python
"""
@package mi.dataset.driver.vel3d_cd.dcl
@file mi/dataset/driver/vel3d_cd/dcl/vel3d_cd_dcl_telemetered_driver.py
@author Emily Hahn
@brief Driver for the telemetered vel3d instrument series c and d through dcl
"""
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.vel3d_cd_dcl import Vel3dCdDclParser
from mi.core.versioning import version
@version("15.7.1")
def parse(unused, source_file_path, particle_data_handler):
"""
This is the method called by Uframe
:param unused
:param source_file_path This is the full path and filename of the file to be parsed
:param particle_data_handler Java Object to consume the output of the parser
:return particle_data_handler
"""
with open(source_file_path, 'rb') as stream_handle:
# create an instance of the concrete driver class defined below
driver = Vel3dCdDclTelemeteredDriver(unused, stream_handle, particle_data_handler)
driver.processFileStream()
return particle_data_handler
class Vel3dCdDclTelemeteredDriver(SimpleDatasetDriver):
"""
Create a _build_parser method for building the vel3d cd dcl parser
"""
def _build_parser(self, stream_handle):
"""
Build the vel3d cd dcl parser
:param stream_handle: The file handle to pass into the parser
:return: The created parser class
"""
# no config input
return Vel3dCdDclParser(stream_handle, self._exception_callback, is_telemetered=True)
| bsd-2-clause | -2,769,532,087,368,393,000 | 32.977778 | 93 | 0.71877 | false |
hanghang177/robo-i-star | robo-i-star/gui.py | 1 | 1370 | import tkinter as tk
from PIL import Image,ImageTk
import sys
import math
WIDTH = 1280
HEIGHT = 720
# Grainger, MEL, MSEB, Engineering Hall, Everitt Labratory, Talbot Laboratory
coordnames = ["Grainger", "Talbot", "Everitt", "Engineering Hall", "Material Science Building", "Mechanical Engineering Lab"]
pixcoords = [[242, 115, 458, 179], [18, 220, 160, 341], [16, 463, 162, 567], [259, 475, 400, 567], [431, 456, 554, 567],[411, 215, 555, 381]]
class UI(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
mapimage = ImageTk.PhotoImage(Image.open("tests/map.jpg"))
self.title("ROBO-I-STAR")
self.location = tk.StringVar()
self.locationindex = tk.IntVar()
# self.geometry("1280x720")
panel = tk.Label(self, image = mapimage)
panel.image = mapimage
#panel.pack()
panel.bind("<Button-1>",self.click)
panel.pack(side="bottom", fill="both", expand="yes")
def click(self, event):
x = event.x
y = event.y
for a in range(len(pixcoords)):
pixcoord = pixcoords[a]
xmin, ymin, xmax, ymax = pixcoord
if (x >= xmin) and (x <= xmax) and (y >= ymin) and (y <= ymax):
self.location.set(coordnames[a])
self.locationindex.set(a)
print(coordnames[a])
self.destroy()
| gpl-3.0 | -7,180,925,067,652,118,000 | 35.052632 | 141 | 0.581022 | false |
nis-sdn/odenos | src/test/python/org/o3project/odenos/core/component/network/packet/test_ofp_in_packet.py | 6 | 5404 | # -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.core.component.network.packet.ofp_in_packet\
import OFPInPacket
import unittest
class InPacketTest(unittest.TestCase):
value = None
result = None
def setUp(self):
self.target = OFPInPacket("ofp_in_packet_id",
"OFPInPacket",
"ofp_in_packet_attributes",
"ofp_in_packet_node",
"ofp_in_packet_port",
{"type": "OFPFlowMatch",
"in_port": "123456",
"in_node": "123456789"},
"ofp_in_packet_data")
def tearDown(self):
self.target = None
def test_constructor(self):
self.assertEqual(self.target._body[self.target.PACKET_ID],
"ofp_in_packet_id")
self.assertEqual(self.target._body[self.target.TYPE],
"OFPInPacket")
self.assertEqual(self.target._body[self.target.ATTRIBUTES],
"ofp_in_packet_attributes")
self.assertEqual(self.target._body[self.target.NODE],
"ofp_in_packet_node")
self.assertEqual(self.target._body[self.target.PORT],
"ofp_in_packet_port")
self.assertEqual(self.target._body[self.target.HEADER],
{"type": "OFPFlowMatch",
"in_port": "123456",
"in_node": "123456789"})
self.assertEqual(self.target._body[self.target.DATA],
"ofp_in_packet_data")
def test_create_from_packed(self):
self.value = {"packet_id": "0123",
"type": "OFPInPacket",
"attributes": "0789",
"node": "9870",
"port": "6540",
"header": {"type": "OFPFlowMatch",
"in_port": "123456",
"in_node": "123456789"},
"data": "0147"}
self.result = OFPInPacket.create_from_packed(self.value)
self.assertEqual(self.result._body[self.target.PACKET_ID],
"0123")
self.assertEqual(self.result._body[self.target.TYPE],
"OFPInPacket")
self.assertEqual(self.result._body[self.target.ATTRIBUTES],
"0789")
self.assertEqual(self.result._body[self.target.NODE],
"9870")
self.assertEqual(self.result._body[self.target.PORT],
"6540")
self.assertEqual(self.result._body[self.target.HEADER]._body,
{"type": "OFPFlowMatch",
"in_port": "123456",
"in_node": "123456789"})
self.assertEqual(self.result._body[self.target.DATA],
"0147")
def test_packed_object(self):
self.value = {"packet_id": "0123",
"type": "OFPInPacket",
"attributes": "0789",
"node": "9870",
"port": "6540",
"header": {"type": "OFPFlowMatch",
"in_port": "123456",
"in_node": "123456789"},
"data": "0147"}
self.create = OFPInPacket.create_from_packed(self.value)
self.result = self.create.packed_object()
self.assertEqual(self.result[self.target.PACKET_ID],
"0123")
self.assertEqual(self.result[self.target.TYPE],
"OFPInPacket")
self.assertEqual(self.result[self.target.ATTRIBUTES],
"0789")
self.assertEqual(self.result[self.target.NODE],
"9870")
self.assertEqual(self.result[self.target.PORT],
"6540")
self.assertEqual(self.result[self.target.HEADER],
{"type": "OFPFlowMatch",
"in_port": "123456",
"in_node": "123456789"})
self.assertEqual(self.result[self.target.DATA],
"0147")
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -7,646,718,790,878,809,000 | 43.295082 | 76 | 0.453368 | false |
chrisrsantiago/chiplibrary | chiplibrary/__init__.py | 1 | 2756 | # -*- coding: utf-8 -*-
import os
from pyramid.config import Configurator
from pyramid.events import BeforeRender, NewRequest
from pyramid.session import UnencryptedCookieSessionFactoryConfig
from pyramid.scripts.pviews import PViewsCommand
from social.apps.pyramid_app.models import init_social
from .lib import helpers, reference
from .db import User
from .db.meta import Base
def add_renderer_globals(event):
"""Template globals.
"""
event['h'] = helpers
event['r'] = reference
def breadcrumb_subscriber(event):
"""Build breadcrumbs on pageload dynamically, to generate in
templates via the `request.bread` list..
"""
pvcomm = PViewsCommand([])
parts = event.request.path_info.split('/')
views = []
for i in range(1, len(parts)):
path = '/'.join(parts[:i])
view = pvcomm._find_view(event.request)
if view:
if path == '':
# Root page
views.append({'url': '/', 'title': 'chiplibrary'})
else:
title = path.split('/')[-1]
if title in set(['bn1', 'bn2', 'bn3', 'bn4', 'bn5', 'bn6']):
title = title.replace('bn', 'Battle Network ')
views.append({'url': path, 'title': title.title()})
# Current Page
views.append({'url': '', 'title': ''})
event.request.bread = views
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
settings = config.get_settings()
# In case certain directories aren't set in the config, we can default to
# making one in the same directory as the config file.
config.add_settings({
'config_path': os.path.dirname(global_config['__file__'])
})
session_factory = UnencryptedCookieSessionFactoryConfig(
settings['session.secret']
)
config.set_session_factory(session_factory)
config.include('pyramid_debugtoolbar')
config.include('pyramid_mako')
config.include('pyramid_dogpile_cache2')
config.include('.db')
config.include('.routes')
# Instantiate Whoosh Search Index
config.include('.lib.search')
# Setup python-social-auth
config.add_request_method(
'chiplibrary.lib.auth.get_user',
'user',
reify=True
)
config.include('chiplibrary.lib.auth')
init_social(config, Base, config.registry['dbsession_factory'])
# Add subscribers to instantiate processes or run necessary startup tasks
config.add_subscriber(add_renderer_globals, BeforeRender)
config.add_subscriber(breadcrumb_subscriber, NewRequest)
config.scan()
config.scan('social_pyramid')
return config.make_wsgi_app()
| mit | -3,795,348,171,892,321,000 | 33.024691 | 77 | 0.641147 | false |
ulope/guardhouse | guardhouse/epio_settings.py | 1 | 1125 | from settings import *
from bundle_config import config
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"HOST": config['postgres']['host'],
"PORT": int(config['postgres']['port']),
"USER": config['postgres']['username'],
"PASSWORD": config['postgres']['password'],
"NAME": config['postgres']['database'],
},
}
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '%s:%s' % (config['redis']['host'], config['redis']['port']),
'OPTIONS': {
'PASSWORD': config['redis']['password'],
},
},
}
# Celery
BROKER_BACKEND = "redis"
BROKER_HOST = config['redis']['host']
BROKER_PORT = int(config['redis']['port'])
BROKER_PASSWORD = config['redis']['password']
CELERY_RESULT_BACKEND = "redis"
REDIS_HOST = config['redis']['host']
REDIS_PORT = int(config['redis']['port'])
REDIS_PASSWORD = config['redis']['password']
COMPRESS_OFFLINE = True
LOGGING['handlers']['console']['level'] = "INFO"
GOOGLE_ANALYTICS_DOMAIN = ".guardhouse.ep.io"
GOOGLE_ANALYTICS_ID = "UA-4328176-5"
| bsd-3-clause | 4,237,921,371,518,185,500 | 27.125 | 81 | 0.599111 | false |
aperigault/ansible | lib/ansible/modules/network/nxos/nxos_snmp_user.py | 15 | 12815 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_snmp_user
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP users for monitoring.
description:
- Manages SNMP user configuration.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Authentication parameters not idempotent.
options:
user:
description:
- Name of the user.
required: true
group:
description:
- Group to which the user will belong to.
If state = present, and the user is existing,
the group is added to the user. If the user
is not existing, user entry is created with this
group argument.
If state = absent, only the group is removed from the
user entry. However, to maintain backward compatibility,
if the existing user belongs to only one group, and if
group argument is same as the existing user's group,
then the user entry also is deleted.
authentication:
description:
- Authentication parameters for the user.
choices: ['md5', 'sha']
pwd:
description:
- Authentication password when using md5 or sha.
This is not idempotent
privacy:
description:
- Privacy password for the user.
This is not idempotent
encrypt:
description:
- Enables AES-128 bit encryption when using privacy password.
type: bool
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_snmp_user:
user: ntc
group: network-operator
authentication: md5
pwd: test_password
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["snmp-server user ntc network-operator auth md5 test_password"]
'''
import re
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module, text=False):
command = {
'command': command,
'output': 'json',
}
if text:
command['output'] = 'text'
return run_commands(module, command)
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_groups(module):
data = execute_show_command('show snmp group', module)[0]
group_list = []
try:
group_table = data['TABLE_role']['ROW_role']
for group in group_table:
group_list.append(group['role_name'])
except (KeyError, AttributeError):
return group_list
return group_list
def get_snmp_user(user, module):
command = 'show snmp user {0}'.format(user)
body = execute_show_command(command, module, text=True)
body_text = body[0]
if 'No such entry' not in body[0]:
body = execute_show_command(command, module)
resource = {}
try:
# The TABLE and ROW keys differ between NXOS platforms.
if body[0].get('TABLE_snmp_user'):
tablekey = 'TABLE_snmp_user'
rowkey = 'ROW_snmp_user'
tablegrpkey = 'TABLE_snmp_group_names'
rowgrpkey = 'ROW_snmp_group_names'
authkey = 'auth_protocol'
privkey = 'priv_protocol'
grpkey = 'group_names'
elif body[0].get('TABLE_snmp_users'):
tablekey = 'TABLE_snmp_users'
rowkey = 'ROW_snmp_users'
tablegrpkey = 'TABLE_groups'
rowgrpkey = 'ROW_groups'
authkey = 'auth'
privkey = 'priv'
grpkey = 'group'
rt = body[0][tablekey][rowkey]
# on some older platforms, all groups except the 1st one
# are in list elements by themselves and they are
# indexed by 'user'. This is due to a platform bug.
# Get first element if rt is a list due to the bug
# or if there is no bug, parse rt directly
if isinstance(rt, list):
resource_table = rt[0]
else:
resource_table = rt
resource['user'] = user
resource['authentication'] = str(resource_table[authkey]).strip()
encrypt = str(resource_table[privkey]).strip()
if encrypt.startswith('aes'):
resource['encrypt'] = 'aes-128'
else:
resource['encrypt'] = 'none'
groups = []
if tablegrpkey in resource_table:
group_table = resource_table[tablegrpkey][rowgrpkey]
try:
for group in group_table:
groups.append(str(group[grpkey]).strip())
except TypeError:
groups.append(str(group_table[grpkey]).strip())
# Now for the platform bug case, get the groups
if isinstance(rt, list):
# remove 1st element from the list as this is parsed already
rt.pop(0)
# iterate through other elements indexed by
# 'user' and add it to groups.
for each in rt:
groups.append(each['user'].strip())
# Some 'F' platforms use 'group' key instead
elif 'group' in resource_table:
# single group is a string, multiple groups in a list
groups = resource_table['group']
if isinstance(groups, str):
groups = [groups]
resource['group'] = groups
except (KeyError, AttributeError, IndexError, TypeError):
if not resource and body_text and 'No such entry' not in body_text:
# 6K and other platforms may not return structured output;
# attempt to get state from text output
resource = get_non_structured_snmp_user(body_text)
return resource
def get_non_structured_snmp_user(body_text):
# This method is a workaround for platforms that don't support structured
# output for 'show snmp user <foo>'. This workaround may not work on all
# platforms. Sample non-struct output:
#
# User Auth Priv(enforce) Groups acl_filter
# ____ ____ _____________ ______ __________
# sample1 no no network-admin ipv4:my_acl
# network-operator
# priv-11
# -OR-
# sample2 md5 des(no) priv-15
# -OR-
# sample3 md5 aes-128(no) network-admin
resource = {}
output = body_text.rsplit('__________')[-1]
pat = re.compile(r'^(?P<user>\S+)\s+'
r'(?P<auth>\S+)\s+'
r'(?P<priv>[\w\d-]+)(?P<enforce>\([\w\d-]+\))*\s+'
r'(?P<group>\S+)',
re.M)
m = re.search(pat, output)
if not m:
return resource
resource['user'] = m.group('user')
resource['auth'] = m.group('auth')
resource['encrypt'] = 'aes-128' if 'aes' in str(m.group('priv')) else 'none'
resource['group'] = [m.group('group')]
more_groups = re.findall(r'^\s+([\w\d-]+)\s*$', output, re.M)
if more_groups:
resource['group'] += more_groups
return resource
def remove_snmp_user(user, group=None):
if group:
return ['no snmp-server user {0} {1}'.format(user, group)]
else:
return ['no snmp-server user {0}'.format(user)]
def config_snmp_user(proposed, user, reset):
if reset:
commands = remove_snmp_user(user)
else:
commands = []
if proposed.get('group'):
cmd = 'snmp-server user {0} {group}'.format(user, **proposed)
else:
cmd = 'snmp-server user {0}'.format(user)
auth = proposed.get('authentication', None)
pwd = proposed.get('pwd', None)
if auth and pwd:
cmd += ' auth {authentication} {pwd}'.format(**proposed)
encrypt = proposed.get('encrypt', None)
privacy = proposed.get('privacy', None)
if encrypt and privacy:
cmd += ' priv {encrypt} {privacy}'.format(**proposed)
elif privacy:
cmd += ' priv {privacy}'.format(**proposed)
if cmd:
commands.append(cmd)
return commands
def main():
argument_spec = dict(
user=dict(required=True, type='str'),
group=dict(type='str'),
pwd=dict(type='str'),
privacy=dict(type='str'),
authentication=dict(choices=['md5', 'sha']),
encrypt=dict(type='bool'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_together=[['authentication', 'pwd'],
['encrypt', 'privacy']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = {'changed': False, 'commands': [], 'warnings': warnings}
user = module.params['user']
group = module.params['group']
pwd = module.params['pwd']
privacy = module.params['privacy']
encrypt = module.params['encrypt']
authentication = module.params['authentication']
state = module.params['state']
if privacy and encrypt:
if not pwd and authentication:
module.fail_json(msg='pwd and authentication must be provided '
'when using privacy and encrypt')
if group and group not in get_snmp_groups(module):
module.fail_json(msg='group not configured yet on switch.')
existing = get_snmp_user(user, module)
if state == 'present' and existing:
if group:
if group not in existing['group']:
existing['group'] = None
else:
existing['group'] = group
else:
existing['group'] = None
commands = []
if state == 'absent' and existing:
if group:
if group in existing['group']:
if len(existing['group']) == 1:
commands.append(remove_snmp_user(user))
else:
commands.append(remove_snmp_user(user, group))
else:
commands.append(remove_snmp_user(user))
elif state == 'present':
reset = False
args = dict(user=user, pwd=pwd, group=group, privacy=privacy,
encrypt=encrypt, authentication=authentication)
proposed = dict((k, v) for k, v in args.items() if v is not None)
if not existing:
if encrypt:
proposed['encrypt'] = 'aes-128'
commands.append(config_snmp_user(proposed, user, reset))
elif existing:
if encrypt and not existing['encrypt'].startswith('aes'):
reset = True
proposed['encrypt'] = 'aes-128'
delta = dict(set(proposed.items()).difference(existing.items()))
if delta.get('pwd'):
delta['authentication'] = authentication
if delta and encrypt:
delta['encrypt'] = 'aes-128'
if delta:
command = config_snmp_user(delta, user, reset)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
results['changed'] = True
if not module.check_mode:
load_config(module, cmds)
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | 5,832,756,629,373,410,000 | 31.361111 | 81 | 0.568552 | false |
SosirisTseng/hearts-of-silicon | hos/ode/ion_rates.py | 1 | 2416 | """
Rate of change of ions
"""
import common_const as cc
from config import USE_NUMBA
def _get_beta_inv(ca, km, total):
"""Calculates buffering factor, beta"""
return 1.0 + km * total / (km + ca)**2
def get_d_ca_all(ca_in, ca_nsr, ca_jsr, j_up, j_rel, j_trpn,
j_xfer, i_cab, i_naca, i_pca, v_naca, v_uni):
"""
Computes rate of change of calcium in all compartments
"""
KM_CMDN, KM_CSQN = 2.38E-3, 0.8 # (mM) Ca half saturation constant for calmodulin
CMDN_TOTAL, CSQN_TOTAL = 5E-2, 5 # (mM) Total calmodulin/calsequestrin concentration
V_NSR, V_JSR = 1.4E-6, 0.16E-6 # (uL) NSR and JSR volume
RATE_TR = 0.5747 # (1/ms) Rate constant for diffusion NSR -> JSR
FREE_MITO_CA = 3E-4 # Fraction of free mitochondrial calcium
# Buffering factor
b_inv_in = _get_beta_inv(ca_in, KM_CMDN, CMDN_TOTAL)
b_inv_nsr = _get_beta_inv(ca_nsr, KM_CSQN, CSQN_TOTAL)
b_inv_jsr = _get_beta_inv(ca_jsr, KM_CSQN, CSQN_TOTAL)
jtr = RATE_TR * (ca_nsr - ca_jsr)
# Derivatives (mM/ms)
d_ca = ((j_xfer - j_up - j_trpn - (i_cab - 2 * i_naca + i_pca) * cc.ACAP_MYO_F
+ (v_naca - v_uni) * cc.V_MITO_V_MYO) / b_inv_in)
d_ca_nsr = (cc.V_MYO / V_NSR * j_up - jtr) / b_inv_nsr
d_ca_jsr = (V_NSR * jtr - cc.V_SS * j_rel) / (V_JSR * b_inv_jsr)
d_ca_mt = FREE_MITO_CA * (v_uni - v_naca)
return d_ca, d_ca_nsr, d_ca_jsr, d_ca_mt
def get_d_vm(i_na, i_nab, i_nsna, i_naca, i_nak, i_k, i_k1, i_kp, i_katp, i_cal, i_pca, i_cab, i_stim):
"""
Computes rate of change of membrane potential, dV/dt (mV/ms)
"""
# Membrane capacitance is 1; thus ommited
return -(i_na + i_nab + i_nsna + i_naca + i_nak + i_k + i_k1 + i_kp + i_katp + i_cal + i_pca + i_cab + i_stim)
def get_d_na_in(i_na, i_nab, i_nsna, i_naca, i_nak):
""" Rate of change of cytoplasmic sodium, dNa/dt (mM/ms)"""
return -cc.ACAP_MYO_F * (i_na + i_nab + i_nsna + 3 * (i_naca + i_nak))
def get_d_k_in(i_k, i_k1, i_kp, i_nak, i_katp, i_stim):
""" Rate of change of cytoplasmic potassium, dK/dt (mM/ms)"""
return -cc.ACAP_MYO_F * (i_k + i_k1 + i_kp + i_katp - 2 * i_nak + i_stim)
if USE_NUMBA:
from numba import vectorize
_get_beta_inv = vectorize(_get_beta_inv)
get_d_ca_all = vectorize(get_d_ca_all)
get_d_vm = vectorize(get_d_vm)
get_d_na_in = vectorize(get_d_na_in)
get_d_k_in = vectorize(get_d_k_in)
| mit | 2,801,133,271,543,708,700 | 37.349206 | 114 | 0.582781 | false |
OptimusGREEN/repo67beta | service.ogmcupdater/isAddonUpToDate.py | 1 | 2786 | import os, xbmcaddon, urllib, urllib2, xbmc
from distutils.version import LooseVersion
################################################################################
################################################################################
##### Check if addon is up-to-date #####
################################################################################
################################################################################
def addon_update_avail(addonID, repo_addonsfile_url):
current = check_addon_current_ver(addonID)
latest = check_addon_latest_ver(addonID, repo_addonsfile_url)
if not latest:
return False
elif compare_versions(current, latest):
return True
else:
return False
def check_addon_latest_ver(addonID, repo_addonsfile_url):
addonline = 'addon id="%s"' % (addonID)
saved = xbmc.translatePath("special://home/userdata/repoaddonsfile.txt")
if not url_exists(repo_addonsfile_url):
return False
urllib.urlretrieve(repo_addonsfile_url, saved)
if os.path.exists(saved):
try:
with open(saved) as f:
content = f.readlines()
for line in content:
line = line.strip('\n')
line = line.strip('\r')
if addonline in line:
prever = line.split('version="', 1)[1]
ver = prever.split('" provider', 1)[0]
f.close()
os.remove(saved)
return ver
except:
xbmc.log("################# OGMC Updater: check_addon_latest_ver: couldn't read file #####################")
def check_addon_current_ver(addonID):
Addon = xbmcaddon.Addon(addonID)
ver = Addon.getAddonInfo('version')
return ver
def compare_versions(current, latest):
if LooseVersion(current) < LooseVersion(latest):
return True
else:
return False
def url_exists(url):
request = urllib2.Request(url)
request.get_method = lambda: 'HEAD'
try:
response = urllib2.urlopen(request)
return True
except:
return False
################################################################################
################################################################################
##### Call using This #####
################################################################################
################################################################################
# if addon_update_avail("putYour_AddonIDhere", "putYour_RepoAddonsXML_url_Here"):
# xbmc.executebuiltin("UpdateAddonRepos()")
# xbmc.executebuiltin("UpdateLocalAddons()") | gpl-3.0 | -2,931,127,970,079,315,000 | 36.16 | 124 | 0.43575 | false |
eskibars/domoticz | plugins/examples/BaseTemplate.py | 14 | 3247 | # Basic Python Plugin Example
#
# Author: GizMoCuz
#
"""
<plugin key="BasePlug" name="Basic Python Plugin Example" author="gizmocuz" version="1.0.0" wikilink="http://www.domoticz.com/wiki/plugins/plugin.html" externallink="https://www.google.com/">
<description>
<h2>Plugin Title</h2><br/>
Overview...
<h3>Features</h3>
<ul style="list-style-type:square">
<li>Feature one...</li>
<li>Feature two...</li>
</ul>
<h3>Devices</h3>
<ul style="list-style-type:square">
<li>Device Type - What it does...</li>
</ul>
<h3>Configuration</h3>
Configuration options...
</description>
<params>
</params>
</plugin>
"""
import Domoticz
class BasePlugin:
enabled = False
def __init__(self):
#self.var = 123
return
def onStart(self):
Domoticz.Log("onStart called")
def onStop(self):
Domoticz.Log("onStop called")
def onConnect(self, Connection, Status, Description):
Domoticz.Log("onConnect called")
def onMessage(self, Connection, Data):
Domoticz.Log("onMessage called")
def onCommand(self, Unit, Command, Level, Hue):
Domoticz.Log("onCommand called for Unit " + str(Unit) + ": Parameter '" + str(Command) + "', Level: " + str(Level))
def onNotification(self, Name, Subject, Text, Status, Priority, Sound, ImageFile):
Domoticz.Log("Notification: " + Name + "," + Subject + "," + Text + "," + Status + "," + str(Priority) + "," + Sound + "," + ImageFile)
def onDisconnect(self, Connection):
Domoticz.Log("onDisconnect called")
def onHeartbeat(self):
Domoticz.Log("onHeartbeat called")
global _plugin
_plugin = BasePlugin()
def onStart():
global _plugin
_plugin.onStart()
def onStop():
global _plugin
_plugin.onStop()
def onConnect(Connection, Status, Description):
global _plugin
_plugin.onConnect(Connection, Status, Description)
def onMessage(Connection, Data):
global _plugin
_plugin.onMessage(Connection, Data)
def onCommand(Unit, Command, Level, Hue):
global _plugin
_plugin.onCommand(Unit, Command, Level, Hue)
def onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile):
global _plugin
_plugin.onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile)
def onDisconnect(Connection):
global _plugin
_plugin.onDisconnect(Connection)
def onHeartbeat():
global _plugin
_plugin.onHeartbeat()
# Generic helper functions
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
Domoticz.Debug( "'" + x + "':'" + str(Parameters[x]) + "'")
Domoticz.Debug("Device count: " + str(len(Devices)))
for x in Devices:
Domoticz.Debug("Device: " + str(x) + " - " + str(Devices[x]))
Domoticz.Debug("Device ID: '" + str(Devices[x].ID) + "'")
Domoticz.Debug("Device Name: '" + Devices[x].Name + "'")
Domoticz.Debug("Device nValue: " + str(Devices[x].nValue))
Domoticz.Debug("Device sValue: '" + Devices[x].sValue + "'")
Domoticz.Debug("Device LastLevel: " + str(Devices[x].LastLevel))
return | gpl-3.0 | 3,684,882,182,494,771,000 | 29.641509 | 191 | 0.612257 | false |
tscholl2/smc | src/scripts/push_vm_images_base.py | 3 | 1264 | #!/usr/bin/env python
###############################################################################
#
# CoCalc: Collaborative Calculation in the Cloud
#
# Copyright (C) 2016, Sagemath Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import hosts, os
BASE = '~/vm/images/base/'
for hostname in hosts.vm_hosts:
# the u below means that this won't overwrite newer files on destination, which could happen by accident if we were careless.
cmd = "rsync --sparse -uaxvH %s %s:vm/images/base/ " % (BASE, hostname)
print(cmd)
os.system(cmd)
| agpl-3.0 | 1,641,609,147,862,512,000 | 39.774194 | 129 | 0.620253 | false |
tmerrick1/spack | var/spack/repos/builtin/packages/soapsnp/package.py | 5 | 1823 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Soapsnp(MakefilePackage):
"""SOAPsnp uses a method based on Bayes' theorem (the reverse probability
model) to call consensus genotype by carefully considering the data
quality, alignment, and recurring experimental errors."""
homepage = "http://soap.genomics.org.cn/soapsnp.html"
url = "http://soap.genomics.org.cn/down/SOAPsnp-v1.03.tar.gz"
version('1.03', '8d69e196013657357ff840b611762ebc')
depends_on('boost')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('soapsnp', prefix.bin)
| lgpl-2.1 | -5,396,797,654,784,089,000 | 42.404762 | 78 | 0.677455 | false |
hujiajie/chromium-crosswalk | tools/perf/metrics/network.py | 22 | 2308 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.value import scalar
from metrics import Metric
NETWORK_DATA_NOT_FOUND = 'Network data could not be found.'
# This is experimental. crbug.com/480512
# Will not be supported once network data is ported to TimelineBasedMetric.
class NetworkMetric(Metric):
"""NetworkMetrics gathers network statistics."""
def __init__(self, platform):
super(NetworkMetric, self).__init__()
self._network_snd = None
self._network_rcv = None
self._platform = platform
self._browser = None
def Start(self, _, tab):
"""Start the per-page preparation for this metric.
Here, this consists of recording the start value.
"""
self._browser = tab.browser
if not self._platform.CanMonitorNetworkData():
return
data = self._platform.GetNetworkData(self._browser)
if data is not None:
self._network_snd, self._network_rcv = data
def Stop(self, _, tab):
"""Prepare the results for this page.
The results are the differences between the current values
and the values when Start() was called.
"""
if not self._platform.CanMonitorNetworkData():
return
data = self._platform.GetNetworkData(self._browser)
if data is not None:
snd, rcv = data
if self._network_snd is not None:
self._network_snd = snd - self._network_snd
if self._network_rcv is not None:
self._network_rcv = rcv - self._network_rcv
else: # If end data cannot be found, report none.
self._network_snd = None
self._network_rcv = None
def AddResults(self, tab, results):
none_value_reason = (
None if self._network_snd is not None else NETWORK_DATA_NOT_FOUND)
results.AddValue(scalar.ScalarValue(
results.current_page, 'network_data_sent', 'kb', self._network_snd,
important=False, none_value_reason=none_value_reason))
none_value_reason = (
None if self._network_rcv is not None else NETWORK_DATA_NOT_FOUND)
results.AddValue(scalar.ScalarValue(
results.current_page, 'network_data_received', 'kb', self._network_rcv,
important=False, none_value_reason=none_value_reason))
| bsd-3-clause | 7,543,104,522,033,818,000 | 32.941176 | 79 | 0.681542 | false |
BitcoinUnlimited/BitcoinUnlimited | qa/rpc-tests/test_framework/bumessages.py | 1 | 12392 | from .nodemessages import *
class QHash(object):
"""quarter hash"""
def __init__(self, shortHash=None):
self.hash = shortHash
def deserialize(self, f):
self.hash = struct.unpack("<Q", f.read(8))[0]
return self
def serialize(self):
r = b""
r += struct.pack("<Q", self.hash)
return r
def __repr__(self):
return "QHash(0x%016x)" % (self.hash)
class Hash(object):
"""sha256 hash"""
def __init__(self, hash=None):
self.hash = hash
def deserialize(self, f):
self.hash = deser_uint256(f)
return self
def serialize(self):
r = b""
r += ser_uint256(self.hash)
return r
def __str__(self):
return "%064x" % self.hash
def __repr__(self):
return "Hash(%064x)" % self.hash
class CXThinBlock(CBlockHeader):
def __init__(self, header=None, vTxHashes=None, vMissingTx=None):
super(CXThinBlock, self).__init__(header)
self.vTxHashes = vTxHashes
self.vMissingTx = vMissingTx
def deserialize(self, f):
super(CXThinBlock, self).deserialize(f)
self.vTxHashes = deser_vector(f, QHash)
self.vMissingTx = deser_vector(f, CTransaction)
return self
def serialize(self):
r = b""
r += super(CXThinBlock, self).serialize()
r += ser_vector(self.vTxHashes)
r += ser_vector(self.vMissingTx)
return r
def summary(self):
s = []
s.append(super(self.__class__, self).summary())
s.append("\nQuarter Hashes")
count = 0
for qh in self.vTxHashes:
if (count % 5) == 0:
s.append("\n%4d: " % count)
s.append("%016x " % qh.hash)
count += 1
s.append("\nFull Transactions\n")
count = 0
for tx in self.vMissingTx:
s.append("%4d: %s\n" % (count, tx.summary()))
count += 1
return "".join(s)
def __str__(self):
return "CXThinBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vTxHashes_len=%d vMissingTx_len=%d)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, time.ctime(self.nTime), self.nBits, self.nNonce, len(self.vTxHashes), len(self.vMissingTx))
# For normal "mainnet" blocks, this function produces a painfully large single line output.
# It is so large, you may be forced to kill your python shell just to get it to stop.
# But it is easy to accidentally call repr from the python interactive shell or pdb. There is no current
# use and removing this function call makes interactive sessions easier to use.
# However, the function shall be left commented out for symmetry with the other objects and in case
# it is needed.
# def __repr__(self):
# return "CXThinBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vTxHashes=%s vMissingTx=%s)" \
# % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
# time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vTxHashes), repr(self.vMissingTx))
class CThinBlock(CBlockHeader):
def __init__(self, header=None):
super(self.__class__, self).__init__(header)
self.vTxHashes = []
self.vMissingTx = []
def deserialize(self, f):
super(self.__class__, self).deserialize(f)
self.vTxHashes = deser_vector(f, Hash)
self.vMissingTx = deser_vector(f, CTransaction)
return self
def serialize(self):
r = b""
r += super(self.__class__, self).serialize()
r += ser_vector(self.vTxHashes)
r += ser_vector(self.vMissingTx)
return r
def __str__(self):
return "CThinBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vTxHashes_len=%d vMissingTx_len=%d)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, time.ctime(self.nTime), self.nBits, self.nNonce, len(self.vTxHashes), len(self.vMissingTx))
# For normal "mainnet" blocks, this function produces a painfully large single line output.
# It is so large, you may be forced to kill your python shell just to get it to stop.
# But it is easy to accidentally call repr from the python interactive shell or pdb. There is no current
# use and removing this function call makes interactive sessions easier to use.
# However, the function shall be left commented out for symmetry with the other objects and in case
# it is needed.
# def __repr__(self):
# return "CThinBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vTxHashes=%s vMissingTx=%s)" \
# % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
# time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vTxHashes), repr(self.vMissingTx))
class CBloomFilter:
def __init__(self, vData=b"", hashFuncs=0, tweak=0, flags = 0):
self.vData = vData
self.nHashFuncs = hashFuncs
self.nTweak = tweak
self.nFlags = flags
def deserialize(self, f):
self.vData = deser_string(f)
self.nHashFuncs = struct.unpack("<I", f.read(4))[0]
self.nTweak = struct.unpack("<I", f.read(4))[0]
self.nFlags = struct.unpack("<B", f.read(1))[0]
return self
def serialize(self):
r = b""
r += ser_string(self.vData)
r += struct.pack("<I", self.nHashFuncs)
r += struct.pack("<I", self.nTweak)
r += struct.pack("<B", self.nFlags)
return r
def __repr__(self):
return "%s(vData=%s)" % (self.__class__.__name__, self.vData)
class CMemPoolSize:
def __init__(self, vData=None):
self.vData = vData
self.nHashFuncs = None
self.nTweak = None
self.nFlags = None
def deserialize(self, f):
self.vData = deser_string(f)
self.nHashFuncs = struct.unpack("<I", f.read(4))[0]
self.nTweak = struct.unpack("<I", f.read(4))[0]
self.nFlags = struct.unpack("<B", f.read(1))[0]
return self
def serialize(self):
r = b""
r += ser_string(f, self.vData)
r += struct.pack("<I", self.nHashFuncs)
r += struct.pack("<I", self.nTweak)
r += struct.pack("<B", self.nFlags)
return r
def __repr__(self):
return "%s(vData=%s)" % (self.__class__.__name__, self.vData)
class msg_thinblock(object):
command = b"thinblock"
def __init__(self, block=None):
if block is None:
self.block = CThinBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
return self
def serialize(self):
return self.block.serialize()
def __str__(self):
return "msg_thinblock(block=%s)" % (str(self.block))
def __repr__(self):
return "msg_thinblock(block=%s)" % (repr(self.block))
class msg_xthinblock(object):
command = b"xthinblock"
def __init__(self, block=None):
if block is None:
self.block = CXThinBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
return self
def serialize(self):
return self.block.serialize()
def __str__(self):
return "msg_xthinblock(block=%s)" % (str(self.block))
def __repr__(self):
return "msg_xthinblock(block=%s)" % (repr(self.block))
class msg_Xb(object):
"""Expedited block message"""
command = b"Xb"
EXPEDITED_MSG_HDR = 1
EXPEDITED_MSG_XTHIN = 2
def __init__(self, block=None, hops=0, msgType=EXPEDITED_MSG_XTHIN):
self.msgType = msgType
self.hops = hops
self.block = block
def deserialize(self, f):
self.msgType = struct.unpack("<B", f.read(1))[0]
self.hops = struct.unpack("<B", f.read(1))[0]
if self.msgType == EXPEDITED_MSG_XTHIN:
self.block = CXThinBlock()
self.block.deserialize(f)
else:
self.block = None
return self
def serialize(self):
r = b""
r += struct.pack("<B", self.msgType)
r += struct.pack("<B", self.hops)
if self.msgType == EXPEDITED_MSG_XTHIN:
r += self.block.serialize()
return r
def __str__(self):
return "msg_Xb(block=%s)" % (str(self.block))
def __repr__(self):
return "msg_Xb(block=%s)" % (repr(self.block))
class msg_get_xthin(object):
command = b"get_xthin"
def __init__(self, inv=None, filter=None):
self.inv = inv
self.filter = filter if filter != None else CBloomFilter()
def deserialize(self, f):
self.inv = CInv()
self.inv.deserialize(f)
self.filter = CBloomFilter()
self.filter.deserialize(f)
return self
def serialize(self):
r = b""
r += self.inv.serialize()
r += self.filter.serialize()
return r
def __repr__(self):
return "%s(inv=%s,filter=%s)" % (self.__class__.__name__, repr(self.inv), repr(self.filter))
class msg_get_thin(object):
command = b"get_thin"
def __init__(self, inv=None):
self.inv = inv
def deserialize(self, f):
self.inv = CInv()
self.inv.deserialize(f)
return self
def serialize(self):
r = b""
r += self.inv.serialize()
return r
def __repr__(self):
return "%s(inv=%s)" % (self.__class__.__name__, repr(self.inv))
class msg_filterload(object):
command = b"filterload"
def __init__(self, inv=None, filter=None):
self.filter = filter
def deserialize(self, f):
self.filter = CBloomFilter()
self.filter.deserialize(f)
return self
def serialize(self):
r = b""
r += self.filter.serialize()
return r
def __repr__(self):
return "%s(filter=%s)" % (self.__class__.__name__, repr(self.filter))
class msg_filteradd(object):
command = b"filteradd"
def __init__(self, inv=None, filter=None):
self.filter = filter
def deserialize(self, f):
self.filter = deser_string(f)
return self
def serialize(self):
r = b""
r += ser_string(f, self.filter)
return r
def __repr__(self):
return "%s(filteradd=%s)" % (self.__class__.__name__, repr(self.filter))
class msg_filterclear(object):
command = b"filterclear"
def __init__(self):
pass
def deserialize(self, f):
return self
def serialize(self):
r = b""
return r
def __repr__(self):
return "msg_filterclear()"
class msg_get_xblocktx(object):
command = b"get_xblocktx"
def __init__(self, blockhash=None, qhashes=None):
self.blockhash = blockhash
self.setCheapHashesToRequest = qhashes
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.setCheapHashesToRequest = deser_vector(f, QHash)
return self
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_vector(self.setCheapHashesToRequest)
return r
def __repr__(self):
return "%s(blockhash=%s,qhash=%s)" % (self.__class__.__name__, repr(self.blockhash), repr(self.setCheapHashesToRequest))
class msg_req_xpedited(object):
"""request expedited blocks"""
command = b"req_xpedited"
EXPEDITED_STOP = 1
EXPEDITED_BLOCKS = 2
EXPEDITED_TXNS = 4
def __init__(self, options=None):
self.options = options
def deserialize(self, f):
self.options = struct.unpack("<Q", f.read(8))[0]
return self
def serialize(self):
r = b""
r += struct.pack("<Q", self.options)
return r
def __repr__(self):
return "%s(0x%x)" % (self.__class__.__name__, self.options)
bumessagemap = {
msg_xthinblock.command: msg_xthinblock,
msg_thinblock.command: msg_thinblock,
msg_get_xthin.command: msg_get_xthin,
msg_get_xblocktx.command: msg_get_xblocktx,
msg_filterload.command: msg_filterload,
msg_filteradd.command: msg_filteradd,
msg_filterclear.command: msg_filterclear,
msg_Xb.command: msg_Xb,
msg_req_xpedited.command: msg_req_xpedited,
}
| mit | 4,719,418,770,639,833,000 | 28.434679 | 162 | 0.585458 | false |
crsmithdev/arrow | arrow/arrow.py | 1 | 62146 | """
Provides the :class:`Arrow <arrow.arrow.Arrow>` class, an enhanced ``datetime``
replacement.
"""
import calendar
import re
import sys
from datetime import date
from datetime import datetime as dt_datetime
from datetime import time as dt_time
from datetime import timedelta
from datetime import tzinfo as dt_tzinfo
from math import trunc
from time import struct_time
from typing import (
Any,
ClassVar,
Generator,
Iterable,
List,
Mapping,
Optional,
Tuple,
Union,
cast,
overload,
)
from dateutil import tz as dateutil_tz
from dateutil.relativedelta import relativedelta
from arrow import formatter, locales, parser, util
from arrow.constants import DEFAULT_LOCALE, DEHUMANIZE_LOCALES
from arrow.locales import TimeFrameLiteral
if sys.version_info < (3, 8): # pragma: no cover
from typing_extensions import Final, Literal
else:
from typing import Final, Literal # pragma: no cover
TZ_EXPR = Union[dt_tzinfo, str]
_T_FRAMES = Literal[
"year",
"years",
"month",
"months",
"day",
"days",
"hour",
"hours",
"minute",
"minutes",
"second",
"seconds",
"microsecond",
"microseconds",
"week",
"weeks",
"quarter",
"quarters",
]
_BOUNDS = Literal["[)", "()", "(]", "[]"]
_GRANULARITY = Literal[
"auto",
"second",
"minute",
"hour",
"day",
"week",
"month",
"year",
]
class Arrow:
"""An :class:`Arrow <arrow.arrow.Arrow>` object.
Implements the ``datetime`` interface, behaving as an aware ``datetime`` while implementing
additional functionality.
:param year: the calendar year.
:param month: the calendar month.
:param day: the calendar day.
:param hour: (optional) the hour. Defaults to 0.
:param minute: (optional) the minute, Defaults to 0.
:param second: (optional) the second, Defaults to 0.
:param microsecond: (optional) the microsecond. Defaults to 0.
:param tzinfo: (optional) A timezone expression. Defaults to UTC.
:param fold: (optional) 0 or 1, used to disambiguate repeated wall times. Defaults to 0.
.. _tz-expr:
Recognized timezone expressions:
- A ``tzinfo`` object.
- A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'.
- A ``str`` in ISO 8601 style, as in '+07:00'.
- A ``str``, one of the following: 'local', 'utc', 'UTC'.
Usage::
>>> import arrow
>>> arrow.Arrow(2013, 5, 5, 12, 30, 45)
<Arrow [2013-05-05T12:30:45+00:00]>
"""
resolution: ClassVar[timedelta] = dt_datetime.resolution
min: ClassVar["Arrow"]
max: ClassVar["Arrow"]
_ATTRS: Final[List[str]] = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"microsecond",
]
_ATTRS_PLURAL: Final[List[str]] = [f"{a}s" for a in _ATTRS]
_MONTHS_PER_QUARTER: Final[int] = 3
_SECS_PER_MINUTE: Final[int] = 60
_SECS_PER_HOUR: Final[int] = 60 * 60
_SECS_PER_DAY: Final[int] = 60 * 60 * 24
_SECS_PER_WEEK: Final[int] = 60 * 60 * 24 * 7
_SECS_PER_MONTH: Final[float] = 60 * 60 * 24 * 30.5
_SECS_PER_YEAR: Final[int] = 60 * 60 * 24 * 365
_SECS_MAP: Final[Mapping[TimeFrameLiteral, float]] = {
"second": 1.0,
"minute": _SECS_PER_MINUTE,
"hour": _SECS_PER_HOUR,
"day": _SECS_PER_DAY,
"week": _SECS_PER_WEEK,
"month": _SECS_PER_MONTH,
"year": _SECS_PER_YEAR,
}
_datetime: dt_datetime
def __init__(
self,
year: int,
month: int,
day: int,
hour: int = 0,
minute: int = 0,
second: int = 0,
microsecond: int = 0,
tzinfo: Optional[TZ_EXPR] = None,
**kwargs: Any,
) -> None:
if tzinfo is None:
tzinfo = dateutil_tz.tzutc()
# detect that tzinfo is a pytz object (issue #626)
elif (
isinstance(tzinfo, dt_tzinfo)
and hasattr(tzinfo, "localize")
and hasattr(tzinfo, "zone")
and tzinfo.zone # type: ignore[attr-defined]
):
tzinfo = parser.TzinfoParser.parse(tzinfo.zone) # type: ignore[attr-defined]
elif isinstance(tzinfo, str):
tzinfo = parser.TzinfoParser.parse(tzinfo)
fold = kwargs.get("fold", 0)
self._datetime = dt_datetime(
year, month, day, hour, minute, second, microsecond, tzinfo, fold=fold
)
# factories: single object, both original and from datetime.
@classmethod
def now(cls, tzinfo: Optional[dt_tzinfo] = None) -> "Arrow":
"""Constructs an :class:`Arrow <arrow.arrow.Arrow>` object, representing "now" in the given
timezone.
:param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time.
Usage::
>>> arrow.now('Asia/Baku')
<Arrow [2019-01-24T20:26:31.146412+04:00]>
"""
if tzinfo is None:
tzinfo = dateutil_tz.tzlocal()
dt = dt_datetime.now(tzinfo)
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
fold=getattr(dt, "fold", 0),
)
@classmethod
def utcnow(cls) -> "Arrow":
"""Constructs an :class:`Arrow <arrow.arrow.Arrow>` object, representing "now" in UTC
time.
Usage::
>>> arrow.utcnow()
<Arrow [2019-01-24T16:31:40.651108+00:00]>
"""
dt = dt_datetime.now(dateutil_tz.tzutc())
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
fold=getattr(dt, "fold", 0),
)
@classmethod
def fromtimestamp(
cls,
timestamp: Union[int, float, str],
tzinfo: Optional[TZ_EXPR] = None,
) -> "Arrow":
"""Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a timestamp, converted to
the given timezone.
:param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either.
:param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time.
"""
if tzinfo is None:
tzinfo = dateutil_tz.tzlocal()
elif isinstance(tzinfo, str):
tzinfo = parser.TzinfoParser.parse(tzinfo)
if not util.is_timestamp(timestamp):
raise ValueError(f"The provided timestamp {timestamp!r} is invalid.")
timestamp = util.normalize_timestamp(float(timestamp))
dt = dt_datetime.fromtimestamp(timestamp, tzinfo)
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
fold=getattr(dt, "fold", 0),
)
@classmethod
def utcfromtimestamp(cls, timestamp: Union[int, float, str]) -> "Arrow":
"""Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a timestamp, in UTC time.
:param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either.
"""
if not util.is_timestamp(timestamp):
raise ValueError(f"The provided timestamp {timestamp!r} is invalid.")
timestamp = util.normalize_timestamp(float(timestamp))
dt = dt_datetime.utcfromtimestamp(timestamp)
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dateutil_tz.tzutc(),
fold=getattr(dt, "fold", 0),
)
@classmethod
def fromdatetime(cls, dt: dt_datetime, tzinfo: Optional[TZ_EXPR] = None) -> "Arrow":
"""Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a ``datetime`` and
optional replacement timezone.
:param dt: the ``datetime``
:param tzinfo: (optional) A :ref:`timezone expression <tz-expr>`. Defaults to ``dt``'s
timezone, or UTC if naive.
Usage::
>>> dt
datetime.datetime(2021, 4, 7, 13, 48, tzinfo=tzfile('/usr/share/zoneinfo/US/Pacific'))
>>> arrow.Arrow.fromdatetime(dt)
<Arrow [2021-04-07T13:48:00-07:00]>
"""
if tzinfo is None:
if dt.tzinfo is None:
tzinfo = dateutil_tz.tzutc()
else:
tzinfo = dt.tzinfo
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
tzinfo,
fold=getattr(dt, "fold", 0),
)
@classmethod
def fromdate(cls, date: date, tzinfo: Optional[TZ_EXPR] = None) -> "Arrow":
"""Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a ``date`` and optional
replacement timezone. All time values are set to 0.
:param date: the ``date``
:param tzinfo: (optional) A :ref:`timezone expression <tz-expr>`. Defaults to UTC.
"""
if tzinfo is None:
tzinfo = dateutil_tz.tzutc()
return cls(date.year, date.month, date.day, tzinfo=tzinfo)
@classmethod
def strptime(
cls, date_str: str, fmt: str, tzinfo: Optional[TZ_EXPR] = None
) -> "Arrow":
"""Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a date string and format,
in the style of ``datetime.strptime``. Optionally replaces the parsed timezone.
:param date_str: the date string.
:param fmt: the format string using datetime format codes.
:param tzinfo: (optional) A :ref:`timezone expression <tz-expr>`. Defaults to the parsed
timezone if ``fmt`` contains a timezone directive, otherwise UTC.
Usage::
>>> arrow.Arrow.strptime('20-01-2019 15:49:10', '%d-%m-%Y %H:%M:%S')
<Arrow [2019-01-20T15:49:10+00:00]>
"""
dt = dt_datetime.strptime(date_str, fmt)
if tzinfo is None:
tzinfo = dt.tzinfo
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
tzinfo,
fold=getattr(dt, "fold", 0),
)
@classmethod
def fromordinal(cls, ordinal: int) -> "Arrow":
"""Constructs an :class:`Arrow <arrow.arrow.Arrow>` object corresponding
to the Gregorian Ordinal.
:param ordinal: an ``int`` corresponding to a Gregorian Ordinal.
Usage::
>>> arrow.fromordinal(737741)
<Arrow [2020-11-12T00:00:00+00:00]>
"""
util.validate_ordinal(ordinal)
dt = dt_datetime.fromordinal(ordinal)
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
fold=getattr(dt, "fold", 0),
)
# factories: ranges and spans
@classmethod
def range(
cls,
frame: _T_FRAMES,
start: Union["Arrow", dt_datetime],
end: Union["Arrow", dt_datetime, None] = None,
tz: Optional[TZ_EXPR] = None,
limit: Optional[int] = None,
) -> Generator["Arrow", None, None]:
"""Returns an iterator of :class:`Arrow <arrow.arrow.Arrow>` objects, representing
points in time between two inputs.
:param frame: The timeframe. Can be any ``datetime`` property (day, hour, minute...).
:param start: A datetime expression, the start of the range.
:param end: (optional) A datetime expression, the end of the range.
:param tz: (optional) A :ref:`timezone expression <tz-expr>`. Defaults to
``start``'s timezone, or UTC if ``start`` is naive.
:param limit: (optional) A maximum number of tuples to return.
**NOTE**: The ``end`` or ``limit`` must be provided. Call with ``end`` alone to
return the entire range. Call with ``limit`` alone to return a maximum # of results from
the start. Call with both to cap a range at a maximum # of results.
**NOTE**: ``tz`` internally **replaces** the timezones of both ``start`` and ``end`` before
iterating. As such, either call with naive objects and ``tz``, or aware objects from the
same timezone and no ``tz``.
Supported frame values: year, quarter, month, week, day, hour, minute, second, microsecond.
Recognized datetime expressions:
- An :class:`Arrow <arrow.arrow.Arrow>` object.
- A ``datetime`` object.
Usage::
>>> start = datetime(2013, 5, 5, 12, 30)
>>> end = datetime(2013, 5, 5, 17, 15)
>>> for r in arrow.Arrow.range('hour', start, end):
... print(repr(r))
...
<Arrow [2013-05-05T12:30:00+00:00]>
<Arrow [2013-05-05T13:30:00+00:00]>
<Arrow [2013-05-05T14:30:00+00:00]>
<Arrow [2013-05-05T15:30:00+00:00]>
<Arrow [2013-05-05T16:30:00+00:00]>
**NOTE**: Unlike Python's ``range``, ``end`` *may* be included in the returned iterator::
>>> start = datetime(2013, 5, 5, 12, 30)
>>> end = datetime(2013, 5, 5, 13, 30)
>>> for r in arrow.Arrow.range('hour', start, end):
... print(repr(r))
...
<Arrow [2013-05-05T12:30:00+00:00]>
<Arrow [2013-05-05T13:30:00+00:00]>
"""
_, frame_relative, relative_steps = cls._get_frames(frame)
tzinfo = cls._get_tzinfo(start.tzinfo if tz is None else tz)
start = cls._get_datetime(start).replace(tzinfo=tzinfo)
end, limit = cls._get_iteration_params(end, limit)
end = cls._get_datetime(end).replace(tzinfo=tzinfo)
current = cls.fromdatetime(start)
original_day = start.day
day_is_clipped = False
i = 0
while current <= end and i < limit:
i += 1
yield current
values = [getattr(current, f) for f in cls._ATTRS]
current = cls(*values, tzinfo=tzinfo).shift( # type: ignore
**{frame_relative: relative_steps}
)
if frame in ["month", "quarter", "year"] and current.day < original_day:
day_is_clipped = True
if day_is_clipped and not cls._is_last_day_of_month(current):
current = current.replace(day=original_day)
def span(
self,
frame: _T_FRAMES,
count: int = 1,
bounds: _BOUNDS = "[)",
exact: bool = False,
week_start: int = 1,
) -> Tuple["Arrow", "Arrow"]:
"""Returns a tuple of two new :class:`Arrow <arrow.arrow.Arrow>` objects, representing the timespan
of the :class:`Arrow <arrow.arrow.Arrow>` object in a given timeframe.
:param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...).
:param count: (optional) the number of frames to span.
:param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies
whether to include or exclude the start and end values in the span. '(' excludes
the start, '[' includes the start, ')' excludes the end, and ']' includes the end.
If the bounds are not specified, the default bound '[)' is used.
:param exact: (optional) whether to have the start of the timespan begin exactly
at the time specified by ``start`` and the end of the timespan truncated
so as not to extend beyond ``end``.
:param week_start: (optional) only used in combination with the week timeframe. Follows isoweekday() where
Monday is 1 and Sunday is 7.
Supported frame values: year, quarter, month, week, day, hour, minute, second.
Usage::
>>> arrow.utcnow()
<Arrow [2013-05-09T03:32:36.186203+00:00]>
>>> arrow.utcnow().span('hour')
(<Arrow [2013-05-09T03:00:00+00:00]>, <Arrow [2013-05-09T03:59:59.999999+00:00]>)
>>> arrow.utcnow().span('day')
(<Arrow [2013-05-09T00:00:00+00:00]>, <Arrow [2013-05-09T23:59:59.999999+00:00]>)
>>> arrow.utcnow().span('day', count=2)
(<Arrow [2013-05-09T00:00:00+00:00]>, <Arrow [2013-05-10T23:59:59.999999+00:00]>)
>>> arrow.utcnow().span('day', bounds='[]')
(<Arrow [2013-05-09T00:00:00+00:00]>, <Arrow [2013-05-10T00:00:00+00:00]>)
>>> arrow.utcnow().span('week')
(<Arrow [2021-02-22T00:00:00+00:00]>, <Arrow [2021-02-28T23:59:59.999999+00:00]>)
>>> arrow.utcnow().span('week', week_start=6)
(<Arrow [2021-02-20T00:00:00+00:00]>, <Arrow [2021-02-26T23:59:59.999999+00:00]>)
"""
if not 1 <= week_start <= 7:
raise ValueError("week_start argument must be between 1 and 7.")
util.validate_bounds(bounds)
frame_absolute, frame_relative, relative_steps = self._get_frames(frame)
if frame_absolute == "week":
attr = "day"
elif frame_absolute == "quarter":
attr = "month"
else:
attr = frame_absolute
floor = self
if not exact:
index = self._ATTRS.index(attr)
frames = self._ATTRS[: index + 1]
values = [getattr(self, f) for f in frames]
for _ in range(3 - len(values)):
values.append(1)
floor = self.__class__(*values, tzinfo=self.tzinfo) # type: ignore
if frame_absolute == "week":
# if week_start is greater than self.isoweekday() go back one week by setting delta = 7
delta = 7 if week_start > self.isoweekday() else 0
floor = floor.shift(days=-(self.isoweekday() - week_start) - delta)
elif frame_absolute == "quarter":
floor = floor.shift(months=-((self.month - 1) % 3))
ceil = floor.shift(**{frame_relative: count * relative_steps})
if bounds[0] == "(":
floor = floor.shift(microseconds=+1)
if bounds[1] == ")":
ceil = ceil.shift(microseconds=-1)
return floor, ceil
def floor(self, frame: _T_FRAMES) -> "Arrow":
"""Returns a new :class:`Arrow <arrow.arrow.Arrow>` object, representing the "floor"
of the timespan of the :class:`Arrow <arrow.arrow.Arrow>` object in a given timeframe.
Equivalent to the first element in the 2-tuple returned by
:func:`span <arrow.arrow.Arrow.span>`.
:param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...).
Usage::
>>> arrow.utcnow().floor('hour')
<Arrow [2013-05-09T03:00:00+00:00]>
"""
return self.span(frame)[0]
def ceil(self, frame: _T_FRAMES) -> "Arrow":
"""Returns a new :class:`Arrow <arrow.arrow.Arrow>` object, representing the "ceiling"
of the timespan of the :class:`Arrow <arrow.arrow.Arrow>` object in a given timeframe.
Equivalent to the second element in the 2-tuple returned by
:func:`span <arrow.arrow.Arrow.span>`.
:param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...).
Usage::
>>> arrow.utcnow().ceil('hour')
<Arrow [2013-05-09T03:59:59.999999+00:00]>
"""
return self.span(frame)[1]
@classmethod
def span_range(
cls,
frame: _T_FRAMES,
start: dt_datetime,
end: dt_datetime,
tz: Optional[TZ_EXPR] = None,
limit: Optional[int] = None,
bounds: _BOUNDS = "[)",
exact: bool = False,
) -> Iterable[Tuple["Arrow", "Arrow"]]:
"""Returns an iterator of tuples, each :class:`Arrow <arrow.arrow.Arrow>` objects,
representing a series of timespans between two inputs.
:param frame: The timeframe. Can be any ``datetime`` property (day, hour, minute...).
:param start: A datetime expression, the start of the range.
:param end: (optional) A datetime expression, the end of the range.
:param tz: (optional) A :ref:`timezone expression <tz-expr>`. Defaults to
``start``'s timezone, or UTC if ``start`` is naive.
:param limit: (optional) A maximum number of tuples to return.
:param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies
whether to include or exclude the start and end values in each span in the range. '(' excludes
the start, '[' includes the start, ')' excludes the end, and ']' includes the end.
If the bounds are not specified, the default bound '[)' is used.
:param exact: (optional) whether to have the first timespan start exactly
at the time specified by ``start`` and the final span truncated
so as not to extend beyond ``end``.
**NOTE**: The ``end`` or ``limit`` must be provided. Call with ``end`` alone to
return the entire range. Call with ``limit`` alone to return a maximum # of results from
the start. Call with both to cap a range at a maximum # of results.
**NOTE**: ``tz`` internally **replaces** the timezones of both ``start`` and ``end`` before
iterating. As such, either call with naive objects and ``tz``, or aware objects from the
same timezone and no ``tz``.
Supported frame values: year, quarter, month, week, day, hour, minute, second, microsecond.
Recognized datetime expressions:
- An :class:`Arrow <arrow.arrow.Arrow>` object.
- A ``datetime`` object.
**NOTE**: Unlike Python's ``range``, ``end`` will *always* be included in the returned
iterator of timespans.
Usage:
>>> start = datetime(2013, 5, 5, 12, 30)
>>> end = datetime(2013, 5, 5, 17, 15)
>>> for r in arrow.Arrow.span_range('hour', start, end):
... print(r)
...
(<Arrow [2013-05-05T12:00:00+00:00]>, <Arrow [2013-05-05T12:59:59.999999+00:00]>)
(<Arrow [2013-05-05T13:00:00+00:00]>, <Arrow [2013-05-05T13:59:59.999999+00:00]>)
(<Arrow [2013-05-05T14:00:00+00:00]>, <Arrow [2013-05-05T14:59:59.999999+00:00]>)
(<Arrow [2013-05-05T15:00:00+00:00]>, <Arrow [2013-05-05T15:59:59.999999+00:00]>)
(<Arrow [2013-05-05T16:00:00+00:00]>, <Arrow [2013-05-05T16:59:59.999999+00:00]>)
(<Arrow [2013-05-05T17:00:00+00:00]>, <Arrow [2013-05-05T17:59:59.999999+00:00]>)
"""
tzinfo = cls._get_tzinfo(start.tzinfo if tz is None else tz)
start = cls.fromdatetime(start, tzinfo).span(frame, exact=exact)[0]
end = cls.fromdatetime(end, tzinfo)
_range = cls.range(frame, start, end, tz, limit)
if not exact:
for r in _range:
yield r.span(frame, bounds=bounds, exact=exact)
for r in _range:
floor, ceil = r.span(frame, bounds=bounds, exact=exact)
if ceil > end:
ceil = end
if bounds[1] == ")":
ceil += relativedelta(microseconds=-1)
if floor == end:
break
elif floor + relativedelta(microseconds=-1) == end:
break
yield floor, ceil
@classmethod
def interval(
cls,
frame: _T_FRAMES,
start: dt_datetime,
end: dt_datetime,
interval: int = 1,
tz: Optional[TZ_EXPR] = None,
bounds: _BOUNDS = "[)",
exact: bool = False,
) -> Iterable[Tuple["Arrow", "Arrow"]]:
"""Returns an iterator of tuples, each :class:`Arrow <arrow.arrow.Arrow>` objects,
representing a series of intervals between two inputs.
:param frame: The timeframe. Can be any ``datetime`` property (day, hour, minute...).
:param start: A datetime expression, the start of the range.
:param end: (optional) A datetime expression, the end of the range.
:param interval: (optional) Time interval for the given time frame.
:param tz: (optional) A timezone expression. Defaults to UTC.
:param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies
whether to include or exclude the start and end values in the intervals. '(' excludes
the start, '[' includes the start, ')' excludes the end, and ']' includes the end.
If the bounds are not specified, the default bound '[)' is used.
:param exact: (optional) whether to have the first timespan start exactly
at the time specified by ``start`` and the final interval truncated
so as not to extend beyond ``end``.
Supported frame values: year, quarter, month, week, day, hour, minute, second
Recognized datetime expressions:
- An :class:`Arrow <arrow.arrow.Arrow>` object.
- A ``datetime`` object.
Recognized timezone expressions:
- A ``tzinfo`` object.
- A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'.
- A ``str`` in ISO 8601 style, as in '+07:00'.
- A ``str``, one of the following: 'local', 'utc', 'UTC'.
Usage:
>>> start = datetime(2013, 5, 5, 12, 30)
>>> end = datetime(2013, 5, 5, 17, 15)
>>> for r in arrow.Arrow.interval('hour', start, end, 2):
... print r
...
(<Arrow [2013-05-05T12:00:00+00:00]>, <Arrow [2013-05-05T13:59:59.999999+00:00]>)
(<Arrow [2013-05-05T14:00:00+00:00]>, <Arrow [2013-05-05T15:59:59.999999+00:00]>)
(<Arrow [2013-05-05T16:00:00+00:00]>, <Arrow [2013-05-05T17:59:59.999999+00:0]>)
"""
if interval < 1:
raise ValueError("interval has to be a positive integer")
spanRange = iter(
cls.span_range(frame, start, end, tz, bounds=bounds, exact=exact)
)
while True:
try:
intvlStart, intvlEnd = next(spanRange)
for _ in range(interval - 1):
try:
_, intvlEnd = next(spanRange)
except StopIteration:
continue
yield intvlStart, intvlEnd
except StopIteration:
return
# representations
def __repr__(self) -> str:
return f"<{self.__class__.__name__} [{self.__str__()}]>"
def __str__(self) -> str:
return self._datetime.isoformat()
def __format__(self, formatstr: str) -> str:
if len(formatstr) > 0:
return self.format(formatstr)
return str(self)
def __hash__(self) -> int:
return self._datetime.__hash__()
# attributes and properties
def __getattr__(self, name: str) -> int:
if name == "week":
return self.isocalendar()[1]
if name == "quarter":
return int((self.month - 1) / self._MONTHS_PER_QUARTER) + 1
if not name.startswith("_"):
value: Optional[int] = getattr(self._datetime, name, None)
if value is not None:
return value
return cast(int, object.__getattribute__(self, name))
@property
def tzinfo(self) -> dt_tzinfo:
"""Gets the ``tzinfo`` of the :class:`Arrow <arrow.arrow.Arrow>` object.
Usage::
>>> arw=arrow.utcnow()
>>> arw.tzinfo
tzutc()
"""
# In Arrow, `_datetime` cannot be naive.
return cast(dt_tzinfo, self._datetime.tzinfo)
@property
def datetime(self) -> dt_datetime:
"""Returns a datetime representation of the :class:`Arrow <arrow.arrow.Arrow>` object.
Usage::
>>> arw=arrow.utcnow()
>>> arw.datetime
datetime.datetime(2019, 1, 24, 16, 35, 27, 276649, tzinfo=tzutc())
"""
return self._datetime
@property
def naive(self) -> dt_datetime:
"""Returns a naive datetime representation of the :class:`Arrow <arrow.arrow.Arrow>`
object.
Usage::
>>> nairobi = arrow.now('Africa/Nairobi')
>>> nairobi
<Arrow [2019-01-23T19:27:12.297999+03:00]>
>>> nairobi.naive
datetime.datetime(2019, 1, 23, 19, 27, 12, 297999)
"""
return self._datetime.replace(tzinfo=None)
def timestamp(self) -> float:
"""Returns a timestamp representation of the :class:`Arrow <arrow.arrow.Arrow>` object, in
UTC time.
Usage::
>>> arrow.utcnow().timestamp()
1616882340.256501
"""
return self._datetime.timestamp()
@property
def int_timestamp(self) -> int:
"""Returns an integer timestamp representation of the :class:`Arrow <arrow.arrow.Arrow>` object, in
UTC time.
Usage::
>>> arrow.utcnow().int_timestamp
1548260567
"""
return int(self.timestamp())
@property
def float_timestamp(self) -> float:
"""Returns a floating-point timestamp representation of the :class:`Arrow <arrow.arrow.Arrow>`
object, in UTC time.
Usage::
>>> arrow.utcnow().float_timestamp
1548260516.830896
"""
return self.timestamp()
@property
def fold(self) -> int:
"""Returns the ``fold`` value of the :class:`Arrow <arrow.arrow.Arrow>` object."""
return self._datetime.fold
@property
def ambiguous(self) -> bool:
"""Indicates whether the :class:`Arrow <arrow.arrow.Arrow>` object is a repeated wall time in the current
timezone.
"""
return dateutil_tz.datetime_ambiguous(self._datetime)
@property
def imaginary(self) -> bool:
"""Indicates whether the :class: `Arrow <arrow.arrow.Arrow>` object exists in the current timezone."""
return not dateutil_tz.datetime_exists(self._datetime)
# mutation and duplication.
def clone(self) -> "Arrow":
"""Returns a new :class:`Arrow <arrow.arrow.Arrow>` object, cloned from the current one.
Usage:
>>> arw = arrow.utcnow()
>>> cloned = arw.clone()
"""
return self.fromdatetime(self._datetime)
def replace(self, **kwargs: Any) -> "Arrow":
"""Returns a new :class:`Arrow <arrow.arrow.Arrow>` object with attributes updated
according to inputs.
Use property names to set their value absolutely::
>>> import arrow
>>> arw = arrow.utcnow()
>>> arw
<Arrow [2013-05-11T22:27:34.787885+00:00]>
>>> arw.replace(year=2014, month=6)
<Arrow [2014-06-11T22:27:34.787885+00:00]>
You can also replace the timezone without conversion, using a
:ref:`timezone expression <tz-expr>`::
>>> arw.replace(tzinfo=tz.tzlocal())
<Arrow [2013-05-11T22:27:34.787885-07:00]>
"""
absolute_kwargs = {}
for key, value in kwargs.items():
if key in self._ATTRS:
absolute_kwargs[key] = value
elif key in ["week", "quarter"]:
raise ValueError(f"Setting absolute {key} is not supported.")
elif key not in ["tzinfo", "fold"]:
raise ValueError(f"Unknown attribute: {key!r}.")
current = self._datetime.replace(**absolute_kwargs)
tzinfo = kwargs.get("tzinfo")
if tzinfo is not None:
tzinfo = self._get_tzinfo(tzinfo)
current = current.replace(tzinfo=tzinfo)
fold = kwargs.get("fold")
if fold is not None:
current = current.replace(fold=fold)
return self.fromdatetime(current)
def shift(self, **kwargs: Any) -> "Arrow":
"""Returns a new :class:`Arrow <arrow.arrow.Arrow>` object with attributes updated
according to inputs.
Use pluralized property names to relatively shift their current value:
>>> import arrow
>>> arw = arrow.utcnow()
>>> arw
<Arrow [2013-05-11T22:27:34.787885+00:00]>
>>> arw.shift(years=1, months=-1)
<Arrow [2014-04-11T22:27:34.787885+00:00]>
Day-of-the-week relative shifting can use either Python's weekday numbers
(Monday = 0, Tuesday = 1 .. Sunday = 6) or using dateutil.relativedelta's
day instances (MO, TU .. SU). When using weekday numbers, the returned
date will always be greater than or equal to the starting date.
Using the above code (which is a Saturday) and asking it to shift to Saturday:
>>> arw.shift(weekday=5)
<Arrow [2013-05-11T22:27:34.787885+00:00]>
While asking for a Monday:
>>> arw.shift(weekday=0)
<Arrow [2013-05-13T22:27:34.787885+00:00]>
"""
relative_kwargs = {}
additional_attrs = ["weeks", "quarters", "weekday"]
for key, value in kwargs.items():
if key in self._ATTRS_PLURAL or key in additional_attrs:
relative_kwargs[key] = value
else:
supported_attr = ", ".join(self._ATTRS_PLURAL + additional_attrs)
raise ValueError(
f"Invalid shift time frame. Please select one of the following: {supported_attr}."
)
# core datetime does not support quarters, translate to months.
relative_kwargs.setdefault("months", 0)
relative_kwargs["months"] += (
relative_kwargs.pop("quarters", 0) * self._MONTHS_PER_QUARTER
)
current = self._datetime + relativedelta(**relative_kwargs)
if not dateutil_tz.datetime_exists(current):
current = dateutil_tz.resolve_imaginary(current)
return self.fromdatetime(current)
def to(self, tz: TZ_EXPR) -> "Arrow":
"""Returns a new :class:`Arrow <arrow.arrow.Arrow>` object, converted
to the target timezone.
:param tz: A :ref:`timezone expression <tz-expr>`.
Usage::
>>> utc = arrow.utcnow()
>>> utc
<Arrow [2013-05-09T03:49:12.311072+00:00]>
>>> utc.to('US/Pacific')
<Arrow [2013-05-08T20:49:12.311072-07:00]>
>>> utc.to(tz.tzlocal())
<Arrow [2013-05-08T20:49:12.311072-07:00]>
>>> utc.to('-07:00')
<Arrow [2013-05-08T20:49:12.311072-07:00]>
>>> utc.to('local')
<Arrow [2013-05-08T20:49:12.311072-07:00]>
>>> utc.to('local').to('utc')
<Arrow [2013-05-09T03:49:12.311072+00:00]>
"""
if not isinstance(tz, dt_tzinfo):
tz = parser.TzinfoParser.parse(tz)
dt = self._datetime.astimezone(tz)
return self.__class__(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
fold=getattr(dt, "fold", 0),
)
# string output and formatting
def format(
self, fmt: str = "YYYY-MM-DD HH:mm:ssZZ", locale: str = DEFAULT_LOCALE
) -> str:
"""Returns a string representation of the :class:`Arrow <arrow.arrow.Arrow>` object,
formatted according to the provided format string.
:param fmt: the format string.
:param locale: the locale to format.
Usage::
>>> arrow.utcnow().format('YYYY-MM-DD HH:mm:ss ZZ')
'2013-05-09 03:56:47 -00:00'
>>> arrow.utcnow().format('X')
'1368071882'
>>> arrow.utcnow().format('MMMM DD, YYYY')
'May 09, 2013'
>>> arrow.utcnow().format()
'2013-05-09 03:56:47 -00:00'
"""
return formatter.DateTimeFormatter(locale).format(self._datetime, fmt)
def humanize(
self,
other: Union["Arrow", dt_datetime, None] = None,
locale: str = DEFAULT_LOCALE,
only_distance: bool = False,
granularity: Union[_GRANULARITY, List[_GRANULARITY]] = "auto",
) -> str:
"""Returns a localized, humanized representation of a relative difference in time.
:param other: (optional) an :class:`Arrow <arrow.arrow.Arrow>` or ``datetime`` object.
Defaults to now in the current :class:`Arrow <arrow.arrow.Arrow>` object's timezone.
:param locale: (optional) a ``str`` specifying a locale. Defaults to 'en-us'.
:param only_distance: (optional) returns only time difference eg: "11 seconds" without "in" or "ago" part.
:param granularity: (optional) defines the precision of the output. Set it to strings 'second', 'minute',
'hour', 'day', 'week', 'month' or 'year' or a list of any combination of these strings
Usage::
>>> earlier = arrow.utcnow().shift(hours=-2)
>>> earlier.humanize()
'2 hours ago'
>>> later = earlier.shift(hours=4)
>>> later.humanize(earlier)
'in 4 hours'
"""
locale_name = locale
locale = locales.get_locale(locale)
if other is None:
utc = dt_datetime.utcnow().replace(tzinfo=dateutil_tz.tzutc())
dt = utc.astimezone(self._datetime.tzinfo)
elif isinstance(other, Arrow):
dt = other._datetime
elif isinstance(other, dt_datetime):
if other.tzinfo is None:
dt = other.replace(tzinfo=self._datetime.tzinfo)
else:
dt = other.astimezone(self._datetime.tzinfo)
else:
raise TypeError(
f"Invalid 'other' argument of type {type(other).__name__!r}. "
"Argument must be of type None, Arrow, or datetime."
)
if isinstance(granularity, list) and len(granularity) == 1:
granularity = granularity[0]
_delta = int(round((self._datetime - dt).total_seconds()))
sign = -1 if _delta < 0 else 1
delta_second = diff = abs(_delta)
try:
if granularity == "auto":
if diff < 10:
return locale.describe("now", only_distance=only_distance)
if diff < self._SECS_PER_MINUTE:
seconds = sign * delta_second
return locale.describe(
"seconds", seconds, only_distance=only_distance
)
elif diff < self._SECS_PER_MINUTE * 2:
return locale.describe("minute", sign, only_distance=only_distance)
elif diff < self._SECS_PER_HOUR:
minutes = sign * max(delta_second // self._SECS_PER_MINUTE, 2)
return locale.describe(
"minutes", minutes, only_distance=only_distance
)
elif diff < self._SECS_PER_HOUR * 2:
return locale.describe("hour", sign, only_distance=only_distance)
elif diff < self._SECS_PER_DAY:
hours = sign * max(delta_second // self._SECS_PER_HOUR, 2)
return locale.describe("hours", hours, only_distance=only_distance)
elif diff < self._SECS_PER_DAY * 2:
return locale.describe("day", sign, only_distance=only_distance)
elif diff < self._SECS_PER_WEEK:
days = sign * max(delta_second // self._SECS_PER_DAY, 2)
return locale.describe("days", days, only_distance=only_distance)
elif diff < self._SECS_PER_WEEK * 2:
return locale.describe("week", sign, only_distance=only_distance)
elif diff < self._SECS_PER_MONTH:
weeks = sign * max(delta_second // self._SECS_PER_WEEK, 2)
return locale.describe("weeks", weeks, only_distance=only_distance)
elif diff < self._SECS_PER_MONTH * 2:
return locale.describe("month", sign, only_distance=only_distance)
elif diff < self._SECS_PER_YEAR:
# TODO revisit for humanization during leap years
self_months = self._datetime.year * 12 + self._datetime.month
other_months = dt.year * 12 + dt.month
months = sign * max(abs(other_months - self_months), 2)
return locale.describe(
"months", months, only_distance=only_distance
)
elif diff < self._SECS_PER_YEAR * 2:
return locale.describe("year", sign, only_distance=only_distance)
else:
years = sign * max(delta_second // self._SECS_PER_YEAR, 2)
return locale.describe("years", years, only_distance=only_distance)
elif isinstance(granularity, str):
granularity = cast(TimeFrameLiteral, granularity) # type: ignore[assignment]
if granularity == "second":
delta = sign * float(delta_second)
if abs(delta) < 2:
return locale.describe("now", only_distance=only_distance)
elif granularity == "minute":
delta = sign * delta_second / self._SECS_PER_MINUTE
elif granularity == "hour":
delta = sign * delta_second / self._SECS_PER_HOUR
elif granularity == "day":
delta = sign * delta_second / self._SECS_PER_DAY
elif granularity == "week":
delta = sign * delta_second / self._SECS_PER_WEEK
elif granularity == "month":
delta = sign * delta_second / self._SECS_PER_MONTH
elif granularity == "year":
delta = sign * delta_second / self._SECS_PER_YEAR
else:
raise ValueError(
"Invalid level of granularity. "
"Please select between 'second', 'minute', 'hour', 'day', 'week', 'month' or 'year'."
)
if trunc(abs(delta)) != 1:
granularity += "s" # type: ignore
return locale.describe(granularity, delta, only_distance=only_distance)
else:
timeframes: List[Tuple[TimeFrameLiteral, float]] = []
def gather_timeframes(_delta: float, _frame: TimeFrameLiteral) -> float:
if _frame in granularity:
value = sign * _delta / self._SECS_MAP[_frame]
_delta %= self._SECS_MAP[_frame]
if trunc(abs(value)) != 1:
timeframes.append(
(cast(TimeFrameLiteral, _frame + "s"), value)
)
else:
timeframes.append((_frame, value))
return _delta
delta = float(delta_second)
frames: Tuple[TimeFrameLiteral, ...] = (
"year",
"month",
"week",
"day",
"hour",
"minute",
"second",
)
for frame in frames:
delta = gather_timeframes(delta, frame)
if len(timeframes) < len(granularity):
raise ValueError(
"Invalid level of granularity. "
"Please select between 'second', 'minute', 'hour', 'day', 'week', 'month' or 'year'."
)
return locale.describe_multi(timeframes, only_distance=only_distance)
except KeyError as e:
raise ValueError(
f"Humanization of the {e} granularity is not currently translated in the {locale_name!r} locale. "
"Please consider making a contribution to this locale."
)
def dehumanize(self, timestring: str, locale: str = "en_us") -> "Arrow":
"""Returns a new :class:`Arrow <arrow.arrow.Arrow>` object, that represents
the time difference relative to the attrbiutes of the
:class:`Arrow <arrow.arrow.Arrow>` object.
:param timestring: a ``str`` representing a humanized relative time.
:param locale: (optional) a ``str`` specifying a locale. Defaults to 'en-us'.
Usage::
>>> arw = arrow.utcnow()
>>> arw
<Arrow [2021-04-20T22:27:34.787885+00:00]>
>>> earlier = arw.dehumanize("2 days ago")
>>> earlier
<Arrow [2021-04-18T22:27:34.787885+00:00]>
>>> arw = arrow.utcnow()
>>> arw
<Arrow [2021-04-20T22:27:34.787885+00:00]>
>>> later = arw.dehumanize("in a month")
>>> later
<Arrow [2021-05-18T22:27:34.787885+00:00]>
"""
# Create a locale object based off given local
locale_obj = locales.get_locale(locale)
# Check to see if locale is supported
normalized_locale_name = locale.lower().replace("_", "-")
if normalized_locale_name not in DEHUMANIZE_LOCALES:
raise ValueError(
f"Dehumanize does not currently support the {locale} locale, please consider making a contribution to add support for this locale."
)
current_time = self.fromdatetime(self._datetime)
# Create an object containing the relative time info
time_object_info = dict.fromkeys(
["seconds", "minutes", "hours", "days", "weeks", "months", "years"], 0
)
# Create an object representing if unit has been seen
unit_visited = dict.fromkeys(
["now", "seconds", "minutes", "hours", "days", "weeks", "months", "years"],
False,
)
# Create a regex pattern object for numbers
num_pattern = re.compile(r"\d+")
# Search timestring for each time unit within locale
for unit in locale_obj.timeframes:
# Numeric unit of change
change_value = 0
# Replace {0} with regex \d representing digits
search_string = str(locale_obj.timeframes[unit])
search_string = search_string.format(r"\d+")
# Create search pattern and find within string
pattern = re.compile(fr"{search_string}")
match = pattern.search(timestring)
# If there is no match continue to next iteration
if not match:
continue
match_string = match.group()
num_match = num_pattern.search(match_string)
# If no number matches set change value to be one
if not num_match:
change_value = 1
else:
change_value = int(num_match.group())
# No time to update if now is the unit
if unit == "now":
unit_visited[unit] = True
continue
# Add change value to the correct unit (incorporates the plurality that exists within timeframe i.e second v.s seconds)
time_unit_to_change = str(unit)
time_unit_to_change += "s" if (str(time_unit_to_change)[-1] != "s") else ""
time_object_info[time_unit_to_change] = change_value
unit_visited[time_unit_to_change] = True
# Assert error if string does not modify any units
if not any([True for k, v in unit_visited.items() if v]):
raise ValueError(
"Input string not valid. Note: Some locales do not support the week granulairty in Arrow. "
"If you are attempting to use the week granularity on an unsupported locale, this could be the cause of this error."
)
# Sign logic
future_string = locale_obj.future
future_string = future_string.format(".*")
future_pattern = re.compile(fr"^{future_string}$")
future_pattern_match = future_pattern.findall(timestring)
past_string = locale_obj.past
past_string = past_string.format(".*")
past_pattern = re.compile(fr"^{past_string}$")
past_pattern_match = past_pattern.findall(timestring)
# If a string contains the now unit, there will be no relative units, hence the need to check if the now unit
# was visited before raising a ValueError
if past_pattern_match:
sign_val = -1
elif future_pattern_match:
sign_val = 1
elif unit_visited["now"]:
sign_val = 0
else:
raise ValueError(
"Invalid input String. String does not contain any relative time information. "
"String should either represent a time in the future or a time in the past. "
"Ex: 'in 5 seconds' or '5 seconds ago'."
)
time_changes = {k: sign_val * v for k, v in time_object_info.items()}
return current_time.shift(**time_changes)
# query functions
def is_between(
self,
start: "Arrow",
end: "Arrow",
bounds: _BOUNDS = "()",
) -> bool:
"""Returns a boolean denoting whether the :class:`Arrow <arrow.arrow.Arrow>` object is between
the start and end limits.
:param start: an :class:`Arrow <arrow.arrow.Arrow>` object.
:param end: an :class:`Arrow <arrow.arrow.Arrow>` object.
:param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies
whether to include or exclude the start and end values in the range. '(' excludes
the start, '[' includes the start, ')' excludes the end, and ']' includes the end.
If the bounds are not specified, the default bound '()' is used.
Usage::
>>> start = arrow.get(datetime(2013, 5, 5, 12, 30, 10))
>>> end = arrow.get(datetime(2013, 5, 5, 12, 30, 36))
>>> arrow.get(datetime(2013, 5, 5, 12, 30, 27)).is_between(start, end)
True
>>> start = arrow.get(datetime(2013, 5, 5))
>>> end = arrow.get(datetime(2013, 5, 8))
>>> arrow.get(datetime(2013, 5, 8)).is_between(start, end, '[]')
True
>>> start = arrow.get(datetime(2013, 5, 5))
>>> end = arrow.get(datetime(2013, 5, 8))
>>> arrow.get(datetime(2013, 5, 8)).is_between(start, end, '[)')
False
"""
util.validate_bounds(bounds)
if not isinstance(start, Arrow):
raise TypeError(
f"Cannot parse start date argument type of {type(start)!r}."
)
if not isinstance(end, Arrow):
raise TypeError(f"Cannot parse end date argument type of {type(start)!r}.")
include_start = bounds[0] == "["
include_end = bounds[1] == "]"
target_ts = self.float_timestamp
start_ts = start.float_timestamp
end_ts = end.float_timestamp
return (
(start_ts <= target_ts <= end_ts)
and (include_start or start_ts < target_ts)
and (include_end or target_ts < end_ts)
)
# datetime methods
def date(self) -> date:
"""Returns a ``date`` object with the same year, month and day.
Usage::
>>> arrow.utcnow().date()
datetime.date(2019, 1, 23)
"""
return self._datetime.date()
def time(self) -> dt_time:
"""Returns a ``time`` object with the same hour, minute, second, microsecond.
Usage::
>>> arrow.utcnow().time()
datetime.time(12, 15, 34, 68352)
"""
return self._datetime.time()
def timetz(self) -> dt_time:
"""Returns a ``time`` object with the same hour, minute, second, microsecond and
tzinfo.
Usage::
>>> arrow.utcnow().timetz()
datetime.time(12, 5, 18, 298893, tzinfo=tzutc())
"""
return self._datetime.timetz()
def astimezone(self, tz: Optional[dt_tzinfo]) -> dt_datetime:
"""Returns a ``datetime`` object, converted to the specified timezone.
:param tz: a ``tzinfo`` object.
Usage::
>>> pacific=arrow.now('US/Pacific')
>>> nyc=arrow.now('America/New_York').tzinfo
>>> pacific.astimezone(nyc)
datetime.datetime(2019, 1, 20, 10, 24, 22, 328172, tzinfo=tzfile('/usr/share/zoneinfo/America/New_York'))
"""
return self._datetime.astimezone(tz)
def utcoffset(self) -> Optional[timedelta]:
"""Returns a ``timedelta`` object representing the whole number of minutes difference from
UTC time.
Usage::
>>> arrow.now('US/Pacific').utcoffset()
datetime.timedelta(-1, 57600)
"""
return self._datetime.utcoffset()
def dst(self) -> Optional[timedelta]:
"""Returns the daylight savings time adjustment.
Usage::
>>> arrow.utcnow().dst()
datetime.timedelta(0)
"""
return self._datetime.dst()
def timetuple(self) -> struct_time:
"""Returns a ``time.struct_time``, in the current timezone.
Usage::
>>> arrow.utcnow().timetuple()
time.struct_time(tm_year=2019, tm_mon=1, tm_mday=20, tm_hour=15, tm_min=17, tm_sec=8, tm_wday=6, tm_yday=20, tm_isdst=0)
"""
return self._datetime.timetuple()
def utctimetuple(self) -> struct_time:
"""Returns a ``time.struct_time``, in UTC time.
Usage::
>>> arrow.utcnow().utctimetuple()
time.struct_time(tm_year=2019, tm_mon=1, tm_mday=19, tm_hour=21, tm_min=41, tm_sec=7, tm_wday=5, tm_yday=19, tm_isdst=0)
"""
return self._datetime.utctimetuple()
def toordinal(self) -> int:
"""Returns the proleptic Gregorian ordinal of the date.
Usage::
>>> arrow.utcnow().toordinal()
737078
"""
return self._datetime.toordinal()
def weekday(self) -> int:
"""Returns the day of the week as an integer (0-6).
Usage::
>>> arrow.utcnow().weekday()
5
"""
return self._datetime.weekday()
def isoweekday(self) -> int:
"""Returns the ISO day of the week as an integer (1-7).
Usage::
>>> arrow.utcnow().isoweekday()
6
"""
return self._datetime.isoweekday()
def isocalendar(self) -> Tuple[int, int, int]:
"""Returns a 3-tuple, (ISO year, ISO week number, ISO weekday).
Usage::
>>> arrow.utcnow().isocalendar()
(2019, 3, 6)
"""
return self._datetime.isocalendar()
def isoformat(self, sep: str = "T", timespec: str = "auto") -> str:
"""Returns an ISO 8601 formatted representation of the date and time.
Usage::
>>> arrow.utcnow().isoformat()
'2019-01-19T18:30:52.442118+00:00'
"""
return self._datetime.isoformat(sep, timespec)
def ctime(self) -> str:
"""Returns a ctime formatted representation of the date and time.
Usage::
>>> arrow.utcnow().ctime()
'Sat Jan 19 18:26:50 2019'
"""
return self._datetime.ctime()
def strftime(self, format: str) -> str:
"""Formats in the style of ``datetime.strftime``.
:param format: the format string.
Usage::
>>> arrow.utcnow().strftime('%d-%m-%Y %H:%M:%S')
'23-01-2019 12:28:17'
"""
return self._datetime.strftime(format)
def for_json(self) -> str:
"""Serializes for the ``for_json`` protocol of simplejson.
Usage::
>>> arrow.utcnow().for_json()
'2019-01-19T18:25:36.760079+00:00'
"""
return self.isoformat()
# math
def __add__(self, other: Any) -> "Arrow":
if isinstance(other, (timedelta, relativedelta)):
return self.fromdatetime(self._datetime + other, self._datetime.tzinfo)
return NotImplemented
def __radd__(self, other: Union[timedelta, relativedelta]) -> "Arrow":
return self.__add__(other)
@overload
def __sub__(self, other: Union[timedelta, relativedelta]) -> "Arrow":
pass # pragma: no cover
@overload
def __sub__(self, other: Union[dt_datetime, "Arrow"]) -> timedelta:
pass # pragma: no cover
def __sub__(self, other: Any) -> Union[timedelta, "Arrow"]:
if isinstance(other, (timedelta, relativedelta)):
return self.fromdatetime(self._datetime - other, self._datetime.tzinfo)
elif isinstance(other, dt_datetime):
return self._datetime - other
elif isinstance(other, Arrow):
return self._datetime - other._datetime
return NotImplemented
def __rsub__(self, other: Any) -> timedelta:
if isinstance(other, dt_datetime):
return other - self._datetime
return NotImplemented
# comparisons
def __eq__(self, other: Any) -> bool:
if not isinstance(other, (Arrow, dt_datetime)):
return False
return self._datetime == self._get_datetime(other)
def __ne__(self, other: Any) -> bool:
if not isinstance(other, (Arrow, dt_datetime)):
return True
return not self.__eq__(other)
def __gt__(self, other: Any) -> bool:
if not isinstance(other, (Arrow, dt_datetime)):
return NotImplemented
return self._datetime > self._get_datetime(other)
def __ge__(self, other: Any) -> bool:
if not isinstance(other, (Arrow, dt_datetime)):
return NotImplemented
return self._datetime >= self._get_datetime(other)
def __lt__(self, other: Any) -> bool:
if not isinstance(other, (Arrow, dt_datetime)):
return NotImplemented
return self._datetime < self._get_datetime(other)
def __le__(self, other: Any) -> bool:
if not isinstance(other, (Arrow, dt_datetime)):
return NotImplemented
return self._datetime <= self._get_datetime(other)
# internal methods
@staticmethod
def _get_tzinfo(tz_expr: Optional[TZ_EXPR]) -> dt_tzinfo:
"""Get normalized tzinfo object from various inputs."""
if tz_expr is None:
return dateutil_tz.tzutc()
if isinstance(tz_expr, dt_tzinfo):
return tz_expr
else:
try:
return parser.TzinfoParser.parse(tz_expr)
except parser.ParserError:
raise ValueError(f"{tz_expr!r} not recognized as a timezone.")
@classmethod
def _get_datetime(
cls, expr: Union["Arrow", dt_datetime, int, float, str]
) -> dt_datetime:
"""Get datetime object from a specified expression."""
if isinstance(expr, Arrow):
return expr.datetime
elif isinstance(expr, dt_datetime):
return expr
elif util.is_timestamp(expr):
timestamp = float(expr)
return cls.utcfromtimestamp(timestamp).datetime
else:
raise ValueError(f"{expr!r} not recognized as a datetime or timestamp.")
@classmethod
def _get_frames(cls, name: _T_FRAMES) -> Tuple[str, str, int]:
"""Finds relevant timeframe and steps for use in range and span methods.
Returns a 3 element tuple in the form (frame, plural frame, step), for example ("day", "days", 1)
"""
if name in cls._ATTRS:
return name, f"{name}s", 1
elif name[-1] == "s" and name[:-1] in cls._ATTRS:
return name[:-1], name, 1
elif name in ["week", "weeks"]:
return "week", "weeks", 1
elif name in ["quarter", "quarters"]:
return "quarter", "months", 3
else:
supported = ", ".join(
[
"year(s)",
"month(s)",
"day(s)",
"hour(s)",
"minute(s)",
"second(s)",
"microsecond(s)",
"week(s)",
"quarter(s)",
]
)
raise ValueError(
f"Range or span over frame {name} not supported. Supported frames: {supported}."
)
@classmethod
def _get_iteration_params(cls, end: Any, limit: Optional[int]) -> Tuple[Any, int]:
"""Sets default end and limit values for range method."""
if end is None:
if limit is None:
raise ValueError("One of 'end' or 'limit' is required.")
return cls.max, limit
else:
if limit is None:
return end, sys.maxsize
return end, limit
@staticmethod
def _is_last_day_of_month(date: "Arrow") -> bool:
"""Returns a boolean indicating whether the datetime is the last day of the month."""
return date.day == calendar.monthrange(date.year, date.month)[1]
Arrow.min = Arrow.fromdatetime(dt_datetime.min)
Arrow.max = Arrow.fromdatetime(dt_datetime.max)
| apache-2.0 | 1,175,093,104,816,348,200 | 32.411828 | 147 | 0.545828 | false |
oourfali/cloud-init-fedora | cloudinit/DataSourceOVF.py | 2 | 9618 | # vi: ts=4 expandtab
#
# Copyright (C) 2011 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <[email protected]>
# Author: Juerg Hafliger <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cloudinit.DataSource as DataSource
from cloudinit import seeddir as base_seeddir
from cloudinit import log
import cloudinit.util as util
import os.path
import os
from xml.dom import minidom
import base64
import re
import tempfile
import subprocess
class DataSourceOVF(DataSource.DataSource):
seed = None
seeddir = base_seeddir + '/ovf'
environment = None
cfg = {}
userdata_raw = None
metadata = None
supported_seed_starts = ("/", "file://")
def __str__(self):
mstr = "DataSourceOVF"
mstr = mstr + " [seed=%s]" % self.seed
return(mstr)
def get_data(self):
found = []
md = {}
ud = ""
defaults = {
"instance-id": "iid-dsovf"
}
(seedfile, contents) = get_ovf_env(base_seeddir)
if seedfile:
# found a seed dir
seed = "%s/%s" % (base_seeddir, seedfile)
(md, ud, cfg) = read_ovf_environment(contents)
self.environment = contents
found.append(seed)
else:
np = {'iso': transport_iso9660,
'vmware-guestd': transport_vmware_guestd, }
name = None
for name, transfunc in np.iteritems():
(contents, _dev, _fname) = transfunc()
if contents:
break
if contents:
(md, ud, cfg) = read_ovf_environment(contents)
self.environment = contents
found.append(name)
# There was no OVF transports found
if len(found) == 0:
return False
if 'seedfrom' in md and md['seedfrom']:
seedfrom = md['seedfrom']
seedfound = False
for proto in self.supported_seed_starts:
if seedfrom.startswith(proto):
seedfound = proto
break
if not seedfound:
log.debug("seed from %s not supported by %s" %
(seedfrom, self.__class__))
return False
(md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
log.debug("using seeded cache data from %s" % seedfrom)
md = util.mergedict(md, md_seed)
found.append(seedfrom)
md = util.mergedict(md, defaults)
self.seed = ",".join(found)
self.metadata = md
self.userdata_raw = ud
self.cfg = cfg
return True
def get_public_ssh_keys(self):
if not 'public-keys' in self.metadata:
return([])
return([self.metadata['public-keys'], ])
# the data sources' config_obj is a cloud-config formated
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere
def get_config_obj(self):
return(self.cfg)
class DataSourceOVFNet(DataSourceOVF):
seeddir = base_seeddir + '/ovf-net'
supported_seed_starts = ("http://", "https://", "ftp://")
# this will return a dict with some content
# meta-data, user-data
def read_ovf_environment(contents):
props = getProperties(contents)
md = {}
cfg = {}
ud = ""
cfg_props = ['password', ]
md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
for prop, val in props.iteritems():
if prop == 'hostname':
prop = "local-hostname"
if prop in md_props:
md[prop] = val
elif prop in cfg_props:
cfg[prop] = val
elif prop == "user-data":
try:
ud = base64.decodestring(val)
except:
ud = val
return(md, ud, cfg)
# returns tuple of filename (in 'dirname', and the contents of the file)
# on "not found", returns 'None' for filename and False for contents
def get_ovf_env(dirname):
env_names = ("ovf-env.xml", "ovf_env.xml", "OVF_ENV.XML", "OVF-ENV.XML")
for fname in env_names:
if os.path.isfile("%s/%s" % (dirname, fname)):
fp = open("%s/%s" % (dirname, fname))
contents = fp.read()
fp.close()
return(fname, contents)
return(None, False)
# transport functions take no input and return
# a 3 tuple of content, path, filename
def transport_iso9660(require_iso=True):
# default_regex matches values in
# /lib/udev/rules.d/60-cdrom_id.rules
# KERNEL!="sr[0-9]*|hd[a-z]|xvd*", GOTO="cdrom_end"
envname = "CLOUD_INIT_CDROM_DEV_REGEX"
default_regex = "^(sr[0-9]+|hd[a-z]|xvd.*)"
devname_regex = os.environ.get(envname, default_regex)
cdmatch = re.compile(devname_regex)
# go through mounts to see if it was already mounted
fp = open("/proc/mounts")
mounts = fp.readlines()
fp.close()
mounted = {}
for mpline in mounts:
(dev, mp, fstype, _opts, _freq, _passno) = mpline.split()
mounted[dev] = (dev, fstype, mp, False)
mp = mp.replace("\\040", " ")
if fstype != "iso9660" and require_iso:
continue
if cdmatch.match(dev[5:]) == None: # take off '/dev/'
continue
(fname, contents) = get_ovf_env(mp)
if contents is not False:
return(contents, dev, fname)
tmpd = None
dvnull = None
devs = os.listdir("/dev/")
devs.sort()
for dev in devs:
fullp = "/dev/%s" % dev
if fullp in mounted or not cdmatch.match(dev) or os.path.isdir(fullp):
continue
fp = None
try:
fp = open(fullp, "rb")
fp.read(512)
fp.close()
except:
if fp:
fp.close()
continue
if tmpd is None:
tmpd = tempfile.mkdtemp()
if dvnull is None:
try:
dvnull = open("/dev/null")
except:
pass
cmd = ["mount", "-o", "ro", fullp, tmpd]
if require_iso:
cmd.extend(('-t', 'iso9660'))
rc = subprocess.call(cmd, stderr=dvnull, stdout=dvnull, stdin=dvnull)
if rc:
continue
(fname, contents) = get_ovf_env(tmpd)
subprocess.call(["umount", tmpd])
if contents is not False:
os.rmdir(tmpd)
return(contents, fullp, fname)
if tmpd:
os.rmdir(tmpd)
if dvnull:
dvnull.close()
return(False, None, None)
def transport_vmware_guestd():
# http://blogs.vmware.com/vapp/2009/07/ \
# selfconfiguration-and-the-ovf-environment.html
# try:
# cmd = ['vmware-guestd', '--cmd', 'info-get guestinfo.ovfEnv']
# (out, err) = subp(cmd)
# return(out, 'guestinfo.ovfEnv', 'vmware-guestd')
# except:
# # would need to error check here and see why this failed
# # to know if log/error should be raised
# return(False, None, None)
return(False, None, None)
def findChild(node, filter_func):
ret = []
if not node.hasChildNodes():
return ret
for child in node.childNodes:
if filter_func(child):
ret.append(child)
return(ret)
def getProperties(environString):
dom = minidom.parseString(environString)
if dom.documentElement.localName != "Environment":
raise Exception("No Environment Node")
if not dom.documentElement.hasChildNodes():
raise Exception("No Child Nodes")
envNsURI = "http://schemas.dmtf.org/ovf/environment/1"
# could also check here that elem.namespaceURI ==
# "http://schemas.dmtf.org/ovf/environment/1"
propSections = findChild(dom.documentElement,
lambda n: n.localName == "PropertySection")
if len(propSections) == 0:
raise Exception("No 'PropertySection's")
props = {}
propElems = findChild(propSections[0], lambda n: n.localName == "Property")
for elem in propElems:
key = elem.attributes.getNamedItemNS(envNsURI, "key").value
val = elem.attributes.getNamedItemNS(envNsURI, "value").value
props[key] = val
return(props)
datasources = (
(DataSourceOVF, (DataSource.DEP_FILESYSTEM, )),
(DataSourceOVFNet,
(DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
)
# return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return(DataSource.list_from_depends(depends, datasources))
if __name__ == "__main__":
def main():
import sys
envStr = open(sys.argv[1]).read()
props = getProperties(envStr)
import pprint
pprint.pprint(props)
md, ud, cfg = read_ovf_environment(envStr)
print "=== md ==="
pprint.pprint(md)
print "=== ud ==="
pprint.pprint(ud)
print "=== cfg ==="
pprint.pprint(cfg)
main()
| gpl-3.0 | 7,722,041,840,196,651,000 | 27.96988 | 79 | 0.577459 | false |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/stats/tests/test_ols.py | 9 | 31424 | """
Unit test suite for OLS and PanelOLS classes
"""
# pylint: disable-msg=W0212
from __future__ import division
from datetime import datetime
from pandas import compat
from distutils.version import LooseVersion
import nose
import numpy as np
from numpy.testing.decorators import slow
from pandas import date_range, bdate_range
from pandas.core.panel import Panel
from pandas import DataFrame, Index, Series, notnull, datetools
from pandas.stats.api import ols
from pandas.stats.ols import _filter_data
from pandas.stats.plm import NonPooledPanelOLS, PanelOLS
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assertRaisesRegexp)
import pandas.util.testing as tm
import pandas.compat as compat
from .common import BaseTest
_have_statsmodels = True
try:
import statsmodels.api as sm
except ImportError:
try:
import scikits.statsmodels.api as sm
except ImportError:
_have_statsmodels = False
def _check_repr(obj):
repr(obj)
str(obj)
def _compare_ols_results(model1, model2):
tm.assertIsInstance(model1, type(model2))
if hasattr(model1, '_window_type'):
_compare_moving_ols(model1, model2)
else:
_compare_fullsample_ols(model1, model2)
def _compare_fullsample_ols(model1, model2):
assert_series_equal(model1.beta, model2.beta)
def _compare_moving_ols(model1, model2):
assert_frame_equal(model1.beta, model2.beta)
class TestOLS(BaseTest):
_multiprocess_can_split_ = True
# TODO: Add tests for OLS y predict
# TODO: Right now we just check for consistency between full-sample and
# rolling/expanding results of the panel OLS. We should also cross-check
# with trusted implementations of panel OLS (e.g. R).
# TODO: Add tests for non pooled OLS.
@classmethod
def setUpClass(cls):
super(TestOLS, cls).setUpClass()
try:
import matplotlib as mpl
mpl.use('Agg', warn=False)
except ImportError:
pass
if not _have_statsmodels:
raise nose.SkipTest("no statsmodels")
def testOLSWithDatasets_ccard(self):
self.checkDataSet(sm.datasets.ccard.load(), skip_moving=True)
self.checkDataSet(sm.datasets.cpunish.load(), skip_moving=True)
self.checkDataSet(sm.datasets.longley.load(), skip_moving=True)
self.checkDataSet(sm.datasets.stackloss.load(), skip_moving=True)
@slow
def testOLSWithDatasets_copper(self):
self.checkDataSet(sm.datasets.copper.load())
@slow
def testOLSWithDatasets_scotland(self):
self.checkDataSet(sm.datasets.scotland.load())
# degenerate case fails on some platforms
# self.checkDataSet(datasets.ccard.load(), 39, 49) # one col in X all
# 0s
def testWLS(self):
# WLS centered SS changed (fixed) in 0.5.0
sm_version = sm.version.version
if sm_version < LooseVersion('0.5.0'):
raise nose.SkipTest("WLS centered SS not fixed in statsmodels"
" version {0}".format(sm_version))
X = DataFrame(np.random.randn(30, 4), columns=['A', 'B', 'C', 'D'])
Y = Series(np.random.randn(30))
weights = X.std(1)
self._check_wls(X, Y, weights)
weights.ix[[5, 15]] = np.nan
Y[[2, 21]] = np.nan
self._check_wls(X, Y, weights)
def _check_wls(self, x, y, weights):
result = ols(y=y, x=x, weights=1 / weights)
combined = x.copy()
combined['__y__'] = y
combined['__weights__'] = weights
combined = combined.dropna()
endog = combined.pop('__y__').values
aweights = combined.pop('__weights__').values
exog = sm.add_constant(combined.values, prepend=False)
sm_result = sm.WLS(endog, exog, weights=1 / aweights).fit()
assert_almost_equal(sm_result.params, result._beta_raw)
assert_almost_equal(sm_result.resid, result._resid_raw)
self.checkMovingOLS('rolling', x, y, weights=weights)
self.checkMovingOLS('expanding', x, y, weights=weights)
def checkDataSet(self, dataset, start=None, end=None, skip_moving=False):
exog = dataset.exog[start: end]
endog = dataset.endog[start: end]
x = DataFrame(exog, index=np.arange(exog.shape[0]),
columns=np.arange(exog.shape[1]))
y = Series(endog, index=np.arange(len(endog)))
self.checkOLS(exog, endog, x, y)
if not skip_moving:
self.checkMovingOLS('rolling', x, y)
self.checkMovingOLS('rolling', x, y, nw_lags=0)
self.checkMovingOLS('expanding', x, y, nw_lags=0)
self.checkMovingOLS('rolling', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1, nw_overlap=True)
def checkOLS(self, exog, endog, x, y):
reference = sm.OLS(endog, sm.add_constant(exog, prepend=False)).fit()
result = ols(y=y, x=x)
# check that sparse version is the same
sparse_result = ols(y=y.to_sparse(), x=x.to_sparse())
_compare_ols_results(result, sparse_result)
assert_almost_equal(reference.params, result._beta_raw)
assert_almost_equal(reference.df_model, result._df_model_raw)
assert_almost_equal(reference.df_resid, result._df_resid_raw)
assert_almost_equal(reference.fvalue, result._f_stat_raw[0])
assert_almost_equal(reference.pvalues, result._p_value_raw)
assert_almost_equal(reference.rsquared, result._r2_raw)
assert_almost_equal(reference.rsquared_adj, result._r2_adj_raw)
assert_almost_equal(reference.resid, result._resid_raw)
assert_almost_equal(reference.bse, result._std_err_raw)
assert_almost_equal(reference.tvalues, result._t_stat_raw)
assert_almost_equal(reference.cov_params(), result._var_beta_raw)
assert_almost_equal(reference.fittedvalues, result._y_fitted_raw)
_check_non_raw_results(result)
def checkMovingOLS(self, window_type, x, y, weights=None, **kwds):
window = sm.tools.tools.rank(x.values) * 2
moving = ols(y=y, x=x, weights=weights, window_type=window_type,
window=window, **kwds)
# check that sparse version is the same
sparse_moving = ols(y=y.to_sparse(), x=x.to_sparse(),
weights=weights,
window_type=window_type,
window=window, **kwds)
_compare_ols_results(moving, sparse_moving)
index = moving._index
for n, i in enumerate(moving._valid_indices):
if window_type == 'rolling' and i >= window:
prior_date = index[i - window + 1]
else:
prior_date = index[0]
date = index[i]
x_iter = {}
for k, v in compat.iteritems(x):
x_iter[k] = v.truncate(before=prior_date, after=date)
y_iter = y.truncate(before=prior_date, after=date)
static = ols(y=y_iter, x=x_iter, weights=weights, **kwds)
self.compare(static, moving, event_index=i,
result_index=n)
_check_non_raw_results(moving)
FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat', 'p_value',
'r2', 'r2_adj', 'rmse', 'std_err', 't_stat',
'var_beta']
def compare(self, static, moving, event_index=None,
result_index=None):
index = moving._index
# Check resid if we have a time index specified
if event_index is not None:
ref = static._resid_raw[-1]
label = index[event_index]
res = moving.resid[label]
assert_almost_equal(ref, res)
ref = static._y_fitted_raw[-1]
res = moving.y_fitted[label]
assert_almost_equal(ref, res)
# Check y_fitted
for field in self.FIELDS:
attr = '_%s_raw' % field
ref = getattr(static, attr)
res = getattr(moving, attr)
if result_index is not None:
res = res[result_index]
assert_almost_equal(ref, res)
def test_ols_object_dtype(self):
df = DataFrame(np.random.randn(20, 2), dtype=object)
model = ols(y=df[0], x=df[1])
summary = repr(model)
class TestOLSMisc(tm.TestCase):
_multiprocess_can_split_ = True
'''
For test coverage with faux data
'''
@classmethod
def setUpClass(cls):
super(TestOLSMisc, cls).setUpClass()
if not _have_statsmodels:
raise nose.SkipTest("no statsmodels")
def test_f_test(self):
x = tm.makeTimeDataFrame()
y = x.pop('A')
model = ols(y=y, x=x)
hyp = '1*B+1*C+1*D=0'
result = model.f_test(hyp)
hyp = ['1*B=0',
'1*C=0',
'1*D=0']
result = model.f_test(hyp)
assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])
self.assertRaises(Exception, model.f_test, '1*A=0')
def test_r2_no_intercept(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
x_with = x.copy()
x_with['intercept'] = 1.
model1 = ols(y=y, x=x)
model2 = ols(y=y, x=x_with, intercept=False)
assert_series_equal(model1.beta, model2.beta)
# TODO: can we infer whether the intercept is there...
self.assertNotEqual(model1.r2, model2.r2)
# rolling
model1 = ols(y=y, x=x, window=20)
model2 = ols(y=y, x=x_with, window=20, intercept=False)
assert_frame_equal(model1.beta, model2.beta)
self.assertTrue((model1.r2 != model2.r2).all())
def test_summary_many_terms(self):
x = DataFrame(np.random.randn(100, 20))
y = np.random.randn(100)
model = ols(y=y, x=x)
model.summary
def test_y_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
model1 = ols(y=y, x=x)
assert_series_equal(model1.y_predict, model1.y_fitted)
assert_almost_equal(model1._y_predict_raw, model1._y_fitted_raw)
def test_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
model1 = ols(y=y, x=x)
assert_series_equal(model1.predict(), model1.y_predict)
assert_series_equal(model1.predict(x=x), model1.y_predict)
assert_series_equal(model1.predict(beta=model1.beta), model1.y_predict)
exog = x.copy()
exog['intercept'] = 1.
rs = Series(np.dot(exog.values, model1.beta.values), x.index)
assert_series_equal(model1.y_predict, rs)
x2 = x.reindex(columns=x.columns[::-1])
assert_series_equal(model1.predict(x=x2), model1.y_predict)
x3 = x2 + 10
pred3 = model1.predict(x=x3)
x3['intercept'] = 1.
x3 = x3.reindex(columns=model1.beta.index)
expected = Series(np.dot(x3.values, model1.beta.values), x3.index)
assert_series_equal(expected, pred3)
beta = Series(0., model1.beta.index)
pred4 = model1.predict(beta=beta)
assert_series_equal(Series(0., pred4.index), pred4)
def test_predict_longer_exog(self):
exogenous = {"1998": "4760", "1999": "5904", "2000": "4504",
"2001": "9808", "2002": "4241", "2003": "4086",
"2004": "4687", "2005": "7686", "2006": "3740",
"2007": "3075", "2008": "3753", "2009": "4679",
"2010": "5468", "2011": "7154", "2012": "4292",
"2013": "4283", "2014": "4595", "2015": "9194",
"2016": "4221", "2017": "4520"}
endogenous = {"1998": "691", "1999": "1580", "2000": "80",
"2001": "1450", "2002": "555", "2003": "956",
"2004": "877", "2005": "614", "2006": "468",
"2007": "191"}
endog = Series(endogenous)
exog = Series(exogenous)
model = ols(y=endog, x=exog)
pred = model.y_predict
self.assertTrue(pred.index.equals(exog.index))
def test_longpanel_series_combo(self):
wp = tm.makePanel()
lp = wp.to_frame()
y = lp.pop('ItemA')
model = ols(y=y, x=lp, entity_effects=True, window=20)
self.assertTrue(notnull(model.beta.values).all())
tm.assertIsInstance(model, PanelOLS)
model.summary
def test_series_rhs(self):
y = tm.makeTimeSeries()
x = tm.makeTimeSeries()
model = ols(y=y, x=x)
expected = ols(y=y, x={'x': x})
assert_series_equal(model.beta, expected.beta)
# GH 5233/5250
assert_series_equal(model.y_predict, model.predict(x=x))
def test_various_attributes(self):
# just make sure everything "works". test correctness elsewhere
x = DataFrame(np.random.randn(100, 5))
y = np.random.randn(100)
model = ols(y=y, x=x, window=20)
series_attrs = ['rank', 'df', 'forecast_mean', 'forecast_vol']
for attr in series_attrs:
value = getattr(model, attr)
tm.assertIsInstance(value, Series)
# works
model._results
def test_catch_regressor_overlap(self):
df1 = tm.makeTimeDataFrame().ix[:, ['A', 'B']]
df2 = tm.makeTimeDataFrame().ix[:, ['B', 'C', 'D']]
y = tm.makeTimeSeries()
data = {'foo': df1, 'bar': df2}
self.assertRaises(Exception, ols, y=y, x=data)
def test_plm_ctor(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
model = ols(y=y, x=x, intercept=False)
model.summary
model = ols(y=y, x=Panel(x))
model.summary
def test_plm_attrs(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
rmodel = ols(y=y, x=x, window=10)
model = ols(y=y, x=x)
model.resid
rmodel.resid
def test_plm_lagged_y_predict(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
model = ols(y=y, x=x, window=10)
result = model.lagged_y_predict(2)
def test_plm_f_test(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
model = ols(y=y, x=x)
hyp = '1*a+1*b=0'
result = model.f_test(hyp)
hyp = ['1*a=0',
'1*b=0']
result = model.f_test(hyp)
assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])
def test_plm_exclude_dummy_corner(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
model = ols(
y=y, x=x, entity_effects=True, dropped_dummies={'entity': 'D'})
model.summary
self.assertRaises(Exception, ols, y=y, x=x, entity_effects=True,
dropped_dummies={'entity': 'E'})
def test_columns_tuples_summary(self):
# #1837
X = DataFrame(np.random.randn(10, 2), columns=[('a', 'b'), ('c', 'd')])
Y = Series(np.random.randn(10))
# it works!
model = ols(y=Y, x=X)
model.summary
class TestPanelOLS(BaseTest):
_multiprocess_can_split_ = True
FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat',
'p_value', 'r2', 'r2_adj', 'rmse', 'std_err',
't_stat', 'var_beta']
_other_fields = ['resid', 'y_fitted']
def testFiltering(self):
result = ols(y=self.panel_y2, x=self.panel_x2)
x = result._x
index = x.index.get_level_values(0)
index = Index(sorted(set(index)))
exp_index = Index([datetime(2000, 1, 1), datetime(2000, 1, 3)])
self.assertTrue
(exp_index.equals(index))
index = x.index.get_level_values(1)
index = Index(sorted(set(index)))
exp_index = Index(['A', 'B'])
self.assertTrue(exp_index.equals(index))
x = result._x_filtered
index = x.index.get_level_values(0)
index = Index(sorted(set(index)))
exp_index = Index([datetime(2000, 1, 1),
datetime(2000, 1, 3),
datetime(2000, 1, 4)])
self.assertTrue(exp_index.equals(index))
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = [[6, 14, 1],
[9, 17, 1],
[30, 48, 1]]
assert_almost_equal(exp_x, result._x.values)
exp_x_filtered = [[6, 14, 1],
[9, 17, 1],
[30, 48, 1],
[11, 20, 1],
[12, 21, 1]]
assert_almost_equal(exp_x_filtered, result._x_filtered.values)
self.assertTrue(result._x_filtered.index.levels[0].equals(
result.y_fitted.index))
def test_wls_panel(self):
y = tm.makeTimeDataFrame()
x = Panel({'x1': tm.makeTimeDataFrame(),
'x2': tm.makeTimeDataFrame()})
y.ix[[1, 7], 'A'] = np.nan
y.ix[[6, 15], 'B'] = np.nan
y.ix[[3, 20], 'C'] = np.nan
y.ix[[5, 11], 'D'] = np.nan
stack_y = y.stack()
stack_x = DataFrame(dict((k, v.stack())
for k, v in compat.iteritems(x)))
weights = x.std('items')
stack_weights = weights.stack()
stack_y.index = stack_y.index._tuple_index
stack_x.index = stack_x.index._tuple_index
stack_weights.index = stack_weights.index._tuple_index
result = ols(y=y, x=x, weights=1 / weights)
expected = ols(y=stack_y, x=stack_x, weights=1 / stack_weights)
assert_almost_equal(result.beta, expected.beta)
for attr in ['resid', 'y_fitted']:
rvals = getattr(result, attr).stack().values
evals = getattr(expected, attr).values
assert_almost_equal(rvals, evals)
def testWithTimeEffects(self):
result = ols(y=self.panel_y2, x=self.panel_x2, time_effects=True)
assert_almost_equal(result._y_trans.values.flat, [0, -0.5, 0.5])
exp_x = [[0, 0], [-10.5, -15.5], [10.5, 15.5]]
assert_almost_equal(result._x_trans.values, exp_x)
# _check_non_raw_results(result)
def testWithEntityEffects(self):
result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True)
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[0., 6., 14., 1.], [0, 9, 17, 1], [1, 30, 48, 1]],
index=result._x.index, columns=['FE_B', 'x1', 'x2',
'intercept'],
dtype=float)
tm.assert_frame_equal(result._x, exp_x.ix[:, result._x.columns])
# _check_non_raw_results(result)
def testWithEntityEffectsAndDroppedDummies(self):
result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True,
dropped_dummies={'entity': 'B'})
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[1., 6., 14., 1.], [1, 9, 17, 1], [0, 30, 48, 1]],
index=result._x.index, columns=['FE_A', 'x1', 'x2',
'intercept'],
dtype=float)
tm.assert_frame_equal(result._x, exp_x.ix[:, result._x.columns])
# _check_non_raw_results(result)
def testWithXEffects(self):
result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'])
assert_almost_equal(result._y.values.flat, [1, 4, 5])
res = result._x
exp_x = DataFrame([[0., 0., 14., 1.], [0, 1, 17, 1], [1, 0, 48, 1]],
columns=['x1_30', 'x1_9', 'x2', 'intercept'],
index=res.index, dtype=float)
assert_frame_equal(res, exp_x.reindex(columns=res.columns))
def testWithXEffectsAndDroppedDummies(self):
result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'],
dropped_dummies={'x1': 30})
res = result._x
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[1., 0., 14., 1.], [0, 1, 17, 1], [0, 0, 48, 1]],
columns=['x1_6', 'x1_9', 'x2', 'intercept'],
index=res.index, dtype=float)
assert_frame_equal(res, exp_x.reindex(columns=res.columns))
def testWithXEffectsAndConversion(self):
result = ols(y=self.panel_y3, x=self.panel_x3, x_effects=['x1', 'x2'])
assert_almost_equal(result._y.values.flat, [1, 2, 3, 4])
exp_x = [[0, 0, 0, 1, 1], [1, 0, 0, 0, 1], [0, 1, 1, 0, 1],
[0, 0, 0, 1, 1]]
assert_almost_equal(result._x.values, exp_x)
exp_index = Index(['x1_B', 'x1_C', 'x2_baz', 'x2_foo', 'intercept'])
self.assertTrue(exp_index.equals(result._x.columns))
# _check_non_raw_results(result)
def testWithXEffectsAndConversionAndDroppedDummies(self):
result = ols(y=self.panel_y3, x=self.panel_x3, x_effects=['x1', 'x2'],
dropped_dummies={'x2': 'foo'})
assert_almost_equal(result._y.values.flat, [1, 2, 3, 4])
exp_x = [[0, 0, 0, 0, 1], [1, 0, 1, 0, 1], [0, 1, 0, 1, 1],
[0, 0, 0, 0, 1]]
assert_almost_equal(result._x.values, exp_x)
exp_index = Index(['x1_B', 'x1_C', 'x2_bar', 'x2_baz', 'intercept'])
self.assertTrue(exp_index.equals(result._x.columns))
# _check_non_raw_results(result)
def testForSeries(self):
self.checkForSeries(self.series_panel_x, self.series_panel_y,
self.series_x, self.series_y)
self.checkForSeries(self.series_panel_x, self.series_panel_y,
self.series_x, self.series_y, nw_lags=0)
self.checkForSeries(self.series_panel_x, self.series_panel_y,
self.series_x, self.series_y, nw_lags=1,
nw_overlap=True)
def testRolling(self):
self.checkMovingOLS(self.panel_x, self.panel_y)
def testRollingWithFixedEffects(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
entity_effects=True)
self.checkMovingOLS(self.panel_x, self.panel_y, intercept=False,
entity_effects=True)
def testRollingWithTimeEffects(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
time_effects=True)
def testRollingWithNeweyWest(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
nw_lags=1)
def testRollingWithEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
cluster='entity')
def testUnknownClusterRaisesValueError(self):
assertRaisesRegexp(ValueError, "Unrecognized cluster.*ridiculous",
self.checkMovingOLS, self.panel_x, self.panel_y,
cluster='ridiculous')
def testRollingWithTimeEffectsAndEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
time_effects=True, cluster='entity')
def testRollingWithTimeCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
cluster='time')
def testRollingWithNeweyWestAndEntityCluster(self):
self.assertRaises(ValueError, self.checkMovingOLS,
self.panel_x, self.panel_y,
nw_lags=1, cluster='entity')
def testRollingWithNeweyWestAndTimeEffectsAndEntityCluster(self):
self.assertRaises(ValueError,
self.checkMovingOLS, self.panel_x, self.panel_y,
nw_lags=1, cluster='entity',
time_effects=True)
def testExpanding(self):
self.checkMovingOLS(
self.panel_x, self.panel_y, window_type='expanding')
def testNonPooled(self):
self.checkNonPooled(y=self.panel_y, x=self.panel_x)
self.checkNonPooled(y=self.panel_y, x=self.panel_x,
window_type='rolling', window=25, min_periods=10)
def testUnknownWindowType(self):
assertRaisesRegexp(ValueError, "window.*ridiculous",
self.checkNonPooled, y=self.panel_y, x=self.panel_x,
window_type='ridiculous', window=25, min_periods=10)
def checkNonPooled(self, x, y, **kwds):
# For now, just check that it doesn't crash
result = ols(y=y, x=x, pool=False, **kwds)
_check_repr(result)
for attr in NonPooledPanelOLS.ATTRIBUTES:
_check_repr(getattr(result, attr))
def checkMovingOLS(self, x, y, window_type='rolling', **kwds):
window = 25 # must be larger than rank of x
moving = ols(y=y, x=x, window_type=window_type,
window=window, **kwds)
index = moving._index
for n, i in enumerate(moving._valid_indices):
if window_type == 'rolling' and i >= window:
prior_date = index[i - window + 1]
else:
prior_date = index[0]
date = index[i]
x_iter = {}
for k, v in compat.iteritems(x):
x_iter[k] = v.truncate(before=prior_date, after=date)
y_iter = y.truncate(before=prior_date, after=date)
static = ols(y=y_iter, x=x_iter, **kwds)
self.compare(static, moving, event_index=i,
result_index=n)
_check_non_raw_results(moving)
def checkForSeries(self, x, y, series_x, series_y, **kwds):
# Consistency check with simple OLS.
result = ols(y=y, x=x, **kwds)
reference = ols(y=series_y, x=series_x, **kwds)
self.compare(reference, result)
def compare(self, static, moving, event_index=None,
result_index=None):
# Check resid if we have a time index specified
if event_index is not None:
staticSlice = _period_slice(static, -1)
movingSlice = _period_slice(moving, event_index)
ref = static._resid_raw[staticSlice]
res = moving._resid_raw[movingSlice]
assert_almost_equal(ref, res)
ref = static._y_fitted_raw[staticSlice]
res = moving._y_fitted_raw[movingSlice]
assert_almost_equal(ref, res)
# Check y_fitted
for field in self.FIELDS:
attr = '_%s_raw' % field
ref = getattr(static, attr)
res = getattr(moving, attr)
if result_index is not None:
res = res[result_index]
assert_almost_equal(ref, res)
def test_auto_rolling_window_type(self):
data = tm.makeTimeDataFrame()
y = data.pop('A')
window_model = ols(y=y, x=data, window=20, min_periods=10)
rolling_model = ols(y=y, x=data, window=20, min_periods=10,
window_type='rolling')
assert_frame_equal(window_model.beta, rolling_model.beta)
def test_group_agg(self):
from pandas.stats.plm import _group_agg
values = np.ones((10, 2)) * np.arange(10).reshape((10, 1))
bounds = np.arange(5) * 2
f = lambda x: x.mean(axis=0)
agged = _group_agg(values, bounds, f)
assert(agged[1][0] == 2.5)
assert(agged[2][0] == 4.5)
# test a function that doesn't aggregate
f2 = lambda x: np.zeros((2, 2))
self.assertRaises(Exception, _group_agg, values, bounds, f2)
def _check_non_raw_results(model):
_check_repr(model)
_check_repr(model.resid)
_check_repr(model.summary_as_matrix)
_check_repr(model.y_fitted)
_check_repr(model.y_predict)
def _period_slice(panelModel, i):
index = panelModel._x_trans.index
period = index.levels[0][i]
L, R = index.get_major_bounds(period, period)
return slice(L, R)
class TestOLSFilter(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
date_index = date_range(datetime(2009, 12, 11), periods=3,
freq=datetools.bday)
ts = Series([3, 1, 4], index=date_index)
self.TS1 = ts
date_index = date_range(datetime(2009, 12, 11), periods=5,
freq=datetools.bday)
ts = Series([1, 5, 9, 2, 6], index=date_index)
self.TS2 = ts
date_index = date_range(datetime(2009, 12, 11), periods=3,
freq=datetools.bday)
ts = Series([5, np.nan, 3], index=date_index)
self.TS3 = ts
date_index = date_range(datetime(2009, 12, 11), periods=5,
freq=datetools.bday)
ts = Series([np.nan, 5, 8, 9, 7], index=date_index)
self.TS4 = ts
data = {'x1': self.TS2, 'x2': self.TS4}
self.DF1 = DataFrame(data=data)
data = {'x1': self.TS2, 'x2': self.TS4}
self.DICT1 = data
def testFilterWithSeriesRHS(self):
(lhs, rhs, weights, rhs_pre,
index, valid) = _filter_data(self.TS1, {'x1': self.TS2}, None)
self.tsAssertEqual(self.TS1, lhs)
self.tsAssertEqual(self.TS2[:3], rhs['x1'])
self.tsAssertEqual(self.TS2, rhs_pre['x1'])
def testFilterWithSeriesRHS2(self):
(lhs, rhs, weights, rhs_pre,
index, valid) = _filter_data(self.TS2, {'x1': self.TS1}, None)
self.tsAssertEqual(self.TS2[:3], lhs)
self.tsAssertEqual(self.TS1, rhs['x1'])
self.tsAssertEqual(self.TS1, rhs_pre['x1'])
def testFilterWithSeriesRHS3(self):
(lhs, rhs, weights, rhs_pre,
index, valid) = _filter_data(self.TS3, {'x1': self.TS4}, None)
exp_lhs = self.TS3[2:3]
exp_rhs = self.TS4[2:3]
exp_rhs_pre = self.TS4[1:]
self.tsAssertEqual(exp_lhs, lhs)
self.tsAssertEqual(exp_rhs, rhs['x1'])
self.tsAssertEqual(exp_rhs_pre, rhs_pre['x1'])
def testFilterWithDataFrameRHS(self):
(lhs, rhs, weights, rhs_pre,
index, valid) = _filter_data(self.TS1, self.DF1, None)
exp_lhs = self.TS1[1:]
exp_rhs1 = self.TS2[1:3]
exp_rhs2 = self.TS4[1:3]
self.tsAssertEqual(exp_lhs, lhs)
self.tsAssertEqual(exp_rhs1, rhs['x1'])
self.tsAssertEqual(exp_rhs2, rhs['x2'])
def testFilterWithDictRHS(self):
(lhs, rhs, weights, rhs_pre,
index, valid) = _filter_data(self.TS1, self.DICT1, None)
exp_lhs = self.TS1[1:]
exp_rhs1 = self.TS2[1:3]
exp_rhs2 = self.TS4[1:3]
self.tsAssertEqual(exp_lhs, lhs)
self.tsAssertEqual(exp_rhs1, rhs['x1'])
self.tsAssertEqual(exp_rhs2, rhs['x2'])
def tsAssertEqual(self, ts1, ts2):
self.assert_numpy_array_equal(ts1, ts2)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 | -6,707,586,531,301,820,000 | 33.646086 | 79 | 0.559286 | false |
mrquim/repository.mrquim | repo/script.module.youtube.dl/lib/youtube_dl/extractor/playtvak.py | 36 | 6794 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
compat_urllib_parse_urlencode,
)
from ..utils import (
ExtractorError,
int_or_none,
parse_iso8601,
qualities,
)
class PlaytvakIE(InfoExtractor):
IE_DESC = 'Playtvak.cz, iDNES.cz and Lidovky.cz'
_VALID_URL = r'https?://(?:.+?\.)?(?:playtvak|idnes|lidovky|metro)\.cz/.*\?(?:c|idvideo)=(?P<id>[^&]+)'
_TESTS = [{
'url': 'http://www.playtvak.cz/vyzente-vosy-a-srsne-ze-zahrady-dn5-/hodinovy-manzel.aspx?c=A150730_150323_hodinovy-manzel_kuko',
'md5': '4525ae312c324b4be2f4603cc78ceb4a',
'info_dict': {
'id': 'A150730_150323_hodinovy-manzel_kuko',
'ext': 'mp4',
'title': 'Vyžeňte vosy a sršně ze zahrady',
'description': 'md5:f93d398691044d303bc4a3de62f3e976',
'thumbnail': r're:(?i)^https?://.*\.(?:jpg|png)$',
'duration': 279,
'timestamp': 1438732860,
'upload_date': '20150805',
'is_live': False,
}
}, { # live video test
'url': 'http://slowtv.playtvak.cz/planespotting-0pr-/planespotting.aspx?c=A150624_164934_planespotting_cat',
'info_dict': {
'id': 'A150624_164934_planespotting_cat',
'ext': 'flv',
'title': 're:^Přímý přenos iDNES.cz [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'Sledujte provoz na ranveji Letiště Václava Havla v Praze',
'thumbnail': r're:(?i)^https?://.*\.(?:jpg|png)$',
'is_live': True,
},
'params': {
'skip_download': True, # requires rtmpdump
},
}, { # idnes.cz
'url': 'http://zpravy.idnes.cz/pes-zavreny-v-aute-rozbijeni-okynek-v-aute-fj5-/domaci.aspx?c=A150809_104116_domaci_pku',
'md5': '819832ba33cd7016e58a6658577fe289',
'info_dict': {
'id': 'A150809_104116_domaci_pku',
'ext': 'mp4',
'title': 'Zavřeli jsme mraženou pizzu do auta. Upekla se',
'description': 'md5:01e73f02329e2e5760bd5eed4d42e3c2',
'thumbnail': r're:(?i)^https?://.*\.(?:jpg|png)$',
'duration': 39,
'timestamp': 1438969140,
'upload_date': '20150807',
'is_live': False,
}
}, { # lidovky.cz
'url': 'http://www.lidovky.cz/dalsi-demonstrace-v-praze-o-migraci-duq-/video.aspx?c=A150808_214044_ln-video_ELE',
'md5': 'c7209ac4ba9d234d4ad5bab7485bcee8',
'info_dict': {
'id': 'A150808_214044_ln-video_ELE',
'ext': 'mp4',
'title': 'Táhni! Demonstrace proti imigrantům budila emoce',
'description': 'md5:97c81d589a9491fbfa323c9fa3cca72c',
'thumbnail': r're:(?i)^https?://.*\.(?:jpg|png)$',
'timestamp': 1439052180,
'upload_date': '20150808',
'is_live': False,
}
}, { # metro.cz
'url': 'http://www.metro.cz/video-pod-billboardem-se-na-vltavske-roztocil-kolotoc-deti-vozil-jen-par-hodin-1hx-/metro-extra.aspx?c=A141111_173251_metro-extra_row',
'md5': '84fc1deedcac37b7d4a6ccae7c716668',
'info_dict': {
'id': 'A141111_173251_metro-extra_row',
'ext': 'mp4',
'title': 'Recesisté udělali z billboardu kolotoč',
'description': 'md5:7369926049588c3989a66c9c1a043c4c',
'thumbnail': r're:(?i)^https?://.*\.(?:jpg|png)$',
'timestamp': 1415725500,
'upload_date': '20141111',
'is_live': False,
}
}, {
'url': 'http://www.playtvak.cz/embed.aspx?idvideo=V150729_141549_play-porad_kuko',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
info_url = self._html_search_regex(
r'Misc\.videoFLV\(\s*{\s*data\s*:\s*"([^"]+)"', webpage, 'info url')
parsed_url = compat_urlparse.urlparse(info_url)
qs = compat_urlparse.parse_qs(parsed_url.query)
qs.update({
'reklama': ['0'],
'type': ['js'],
})
info_url = compat_urlparse.urlunparse(
parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
json_info = self._download_json(
info_url, video_id,
transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1])
item = None
for i in json_info['items']:
if i.get('type') == 'video' or i.get('type') == 'stream':
item = i
break
if not item:
raise ExtractorError('No suitable stream found')
quality = qualities(('low', 'middle', 'high'))
formats = []
for fmt in item['video']:
video_url = fmt.get('file')
if not video_url:
continue
format_ = fmt['format']
format_id = '%s_%s' % (format_, fmt['quality'])
preference = None
if format_ in ('mp4', 'webm'):
ext = format_
elif format_ == 'rtmp':
ext = 'flv'
elif format_ == 'apple':
ext = 'mp4'
# Some streams have mp3 audio which does not play
# well with ffmpeg filter aac_adtstoasc
preference = -1
elif format_ == 'adobe': # f4m manifest fails with 404 in 80% of requests
continue
else: # Other formats not supported yet
continue
formats.append({
'url': video_url,
'ext': ext,
'format_id': format_id,
'quality': quality(fmt.get('quality')),
'preference': preference,
})
self._sort_formats(formats)
title = item['title']
is_live = item['type'] == 'stream'
if is_live:
title = self._live_title(title)
description = self._og_search_description(webpage, default=None) or self._html_search_meta(
'description', webpage, 'description')
timestamp = None
duration = None
if not is_live:
duration = int_or_none(item.get('length'))
timestamp = item.get('published')
if timestamp:
timestamp = parse_iso8601(timestamp[:-5])
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': item.get('image'),
'duration': duration,
'timestamp': timestamp,
'is_live': is_live,
'formats': formats,
}
| gpl-2.0 | -4,390,604,338,287,343,000 | 36.436464 | 171 | 0.521104 | false |
elyezer/robottelo | robottelo/ui/contenthost.py | 1 | 10594 | """Utilities to manipulate content hosts via UI."""
from robottelo.ui.base import Base, UIError
from robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.navigator import Navigator
class ContentHost(Base):
"""Manipulates Content Hosts from UI"""
is_katello = True
def navigate_to_entity(self):
"""Navigate to Content Hosts entity page"""
Navigator(self.browser).go_to_content_hosts()
def _search_locator(self):
"""Specify locator for Content Hosts entity search procedure"""
return locators['contenthost.select_name']
def add_subscriptions(self, subscriptions=None, tab_locator=None,
select_locator=None):
"""Add or remove subscription association for content host."""
strategy, value = locators['contenthost.subscription_select']
self.click(tab_locators['contenthost.tab_subscriptions'])
self.click(tab_locators['contenthost.tab_subscriptions_subscriptions'])
if not self.wait_until_element(tab_locator):
raise UIError('Can not manage subscriptions for content host.'
'Make sure content host is registered')
self.click(tab_locators['contenthost.add_subscription'])
for subscription in subscriptions:
self.click(strategy, value % subscription)
self.click(select_locator)
def update(self, name, new_name=None, add_subscriptions=None,
rm_subscriptions=None):
"""Updates an existing content host"""
self.search_and_click(name)
self.click(tab_locators['contenthost.tab_details'])
if new_name:
self.edit_entity(
locators['contenthost.edit_name'],
locators['contenthost.edit_name_text'],
new_name,
locators['contenthost.save_name'],
)
if add_subscriptions:
self.add_subscriptions(
subscriptions=add_subscriptions,
tab_locator=tab_locators['contenthost.add_subscription'],
select_locator=locators['contenthost.add_selected'],
)
if rm_subscriptions:
self.add_subscriptions(
subscriptions=add_subscriptions,
tab_locator=tab_locators['contenthost.list_subscriptions'],
select_locator=locators['contenthost.remove_selected'],
)
def unregister(self, name, really=True):
"""Unregisters a content host."""
self.search_and_click(name)
self.click(locators['contenthost.unregister'])
if really:
self.click(common_locators['confirm_remove'])
else:
self.click(common_locators['cancel'])
def delete(self, name, really=True):
"""Unregisters and completely deletes content host. Custom helper is
needed as deletion works through unregistering menu, by selecting
appropriate radio button."""
self.logger.debug(u'Deleting entity %s', name)
self.search_and_click(name)
self.click(locators['contenthost.unregister'])
self.click(locators['contenthost.confirm_deletion'])
if really:
self.click(common_locators['confirm_remove'])
else:
self.click(common_locators['cancel'])
# Make sure that element is really removed from UI
self.button_timeout = 3
self.result_timeout = 1
try:
for _ in range(3):
searched = self.search(name)
if bool(searched) != really:
break
self.browser.refresh()
if bool(searched) == really:
raise UIError(
u'Delete functionality works improperly for "{0}" entity'
.format(name))
finally:
self.button_timeout = 15
self.result_timeout = 15
def validate_subscription_status(self, name, expected_value=True,
timeout=120):
"""Check whether a content host has active subscription or not"""
for _ in range(timeout / 5):
self.search(name)
strategy, value = (
locators['contenthost.subscription_active'] if expected_value
else locators['contenthost.subscription_not_active']
)
result = self.wait_until_element(
(strategy, value % name), timeout=5)
if result:
return True
return False
def execute_package_action(self, name, action_name, action_value,
timeout=120):
"""Execute remote package action on a content host
:param name: content host name to remotely execute package action on
:param action_name: remote action to execute. Can be one of 5: 'Package
Install', 'Package Update', 'Package Remove', 'Group Install' or
'Group Remove'
:param action_value: Package or package group group name to remotely
install/upgrade/remove (depending on `action_name`)
:param timeout: Timeout in seconds for remote action task to finish
:raise: UIError if remote task finished by timeout
:return: Returns a string containing task status
"""
self.click(self.search(name))
self.click(tab_locators['contenthost.tab_packages'])
self.assign_value(
locators['contenthost.remote_actions'], action_name)
self.assign_value(
locators['contenthost.package_name_input'], action_value)
self.click(locators['contenthost.perform_remote_action'])
result = self.wait_until_element(
locators['contenthost.remote_action_finished'],
timeout=timeout,
)
if result is None:
raise UIError('Timeout waiting for package action to finish')
return result.get_attribute('type')
def install_errata(self, name, errata_id, timeout=120):
"""Install errata on a content host
:param name: content host name to apply errata on
:param errata_id: errata id, e.g. 'RHEA-2012:0055'
:param timeout: Timeout in seconds for errata installation task to
finish
:raise: UIError if remote task finished by timeout
:return: Returns a string containing task status
"""
self.click(self.search(name))
self.click(tab_locators['contenthost.tab_errata'])
self.click(locators['contenthost.errata_select'] % errata_id)
self.click(locators['contenthost.errata_apply'])
self.click(locators['contenthost.confirm_errata'])
result = self.wait_until_element(
locators['contenthost.remote_action_finished'],
timeout=timeout,
)
if result is None:
raise UIError('Timeout waiting for errata installation to finish')
return result.get_attribute('type')
def package_search(self, name, package_name):
"""Search for installed package on specific content host"""
self.click(self.search(name))
self.click(tab_locators['contenthost.tab_packages'])
self.assign_value(
locators['contenthost.package_search_box'], package_name)
self.click(locators['contenthost.package_search_button'])
return self.wait_until_element(
locators['contenthost.package_search_name'] % package_name)
def errata_search(self, name, errata_id, environment_name=None):
"""Search for errata applicable for specific content host"""
self.search_and_click(name)
self.click(tab_locators['contenthost.tab_errata'])
if environment_name is not None:
self.click(
locators['contenthost.errata_environment_select'] %
environment_name
)
self.assign_value(
common_locators['kt_table_search'],
'id = "{0}"'.format(errata_id),
)
self.click(common_locators['kt_table_search_button'])
return self.wait_until_element(
locators['contenthost.errata_select'] % errata_id)
def fetch_errata_counts(self, name, details_page=False):
"""Fetch errata of all types available for content host and return a
dict containing errata name (type), color and value (errata counts).
Works both from content host list and details pages.
"""
contenthost = self.search(name)
if details_page:
self.click(contenthost)
strategy, value = locators[
'contenthost.details_page_errata_counts']
else:
strategy, value = locators['contenthost.select_errata_counts']
value = value % name
erratas = self.browser.find_elements(strategy, value)
result = {}
for errata in erratas:
value = errata.text
icon = errata.find_element(
*locators['contenthost.errata_counts_icon'])
name = icon.get_attribute('title').lower().replace(' ', '_')
color = icon.get_attribute('class').split()[-1]
result[name] = {}
result[name]['color'] = color
result[name]['value'] = int(value)
return result
def fetch_parameters(self, name, parameters_list):
"""Fetches parameter values of specified host
:param name: content host's name (with domain)
:param parameters_list: A list of parameters to be fetched. Each
parameter should be a separate list containing tab name and
parameter name in absolute correspondence to UI (Similar to
parameters list passed to create a host). Example::
[
['Details', 'Registered By'],
['Provisioning Details', 'Status'],
]
:return: Dictionary of parameter name - parameter value pairs
:rtype: dict
"""
self.search_and_click(name)
result = {}
for tab_name, param_name in parameters_list:
tab_locator = tab_locators['.tab_'.join((
'contenthost',
(tab_name.lower()).replace(' ', '_')
))]
param_locator = locators['.fetch_'.join((
'contenthost',
(param_name.lower()).replace(' ', '_')
))]
self.click(tab_locator)
result[param_name] = self.wait_until_element(param_locator).text
return result
| gpl-3.0 | -4,051,547,977,331,056,600 | 41.546185 | 79 | 0.599868 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/lib2to3/fixes/fix_import.py | 1 | 2905 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: fix_import.py
"""Fixer for import statements.
If spam is being imported from the local directory, this import:
from spam import eggs
Becomes:
from .spam import eggs
And this import:
import spam
Becomes:
from . import spam
"""
from .. import fixer_base
from os.path import dirname, join, exists, sep
from ..fixer_util import FromImport, syms, token
def traverse_imports(names):
"""
Walks over all the names imported in a dotted_as_names node.
"""
pending = [
names]
while pending:
node = pending.pop()
if node.type == token.NAME:
yield node.value
elif node.type == syms.dotted_name:
yield ''.join([ ch.value for ch in node.children ])
elif node.type == syms.dotted_as_name:
pending.append(node.children[0])
elif node.type == syms.dotted_as_names:
pending.extend(node.children[::-2])
else:
raise AssertionError('unkown node type')
class FixImport(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "\n import_from< 'from' imp=any 'import' ['('] any [')'] >\n |\n import_name< 'import' imp=any >\n "
def start_tree(self, tree, name):
super(FixImport, self).start_tree(tree, name)
self.skip = 'absolute_import' in tree.future_features
def transform(self, node, results):
if self.skip:
return
imp = results['imp']
if node.type == syms.import_from:
while not hasattr(imp, 'value'):
imp = imp.children[0]
if self.probably_a_local_import(imp.value):
imp.value = '.' + imp.value
imp.changed()
else:
have_local = False
have_absolute = False
for mod_name in traverse_imports(imp):
if self.probably_a_local_import(mod_name):
have_local = True
else:
have_absolute = True
if have_absolute:
if have_local:
self.warning(node, 'absolute and local imports together')
return
new = FromImport('.', [imp])
new.prefix = node.prefix
return new
def probably_a_local_import(self, imp_name):
if imp_name.startswith('.'):
return False
imp_name = imp_name.split('.', 1)[0]
base_path = dirname(self.filename)
base_path = join(base_path, imp_name)
if not exists(join(dirname(base_path), '__init__.py')):
return False
for ext in ['.py', sep, '.pyc', '.so', '.sl', '.pyd']:
if exists(base_path + ext):
return True
return False | unlicense | -4,872,746,734,044,116,000 | 31.651685 | 126 | 0.558692 | false |
vmarteev/amphtml | validator/webui/build.py | 22 | 5761 | #!/usr/bin/env python2.7
#
# Copyright 2016 The AMP HTML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the license.
#
"""A build script which (thus far) works on Ubuntu 14."""
# TODO(powdercloud): Make a gulp file or similar for this. For now
# it's simply split off from the main build.py in the parent
# directory, but this is not an idiomatic use to build a Javascript or
# Polymer project, and unlike for the parent directory there's no
# particular benefit to using Python.
import glob
import logging
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
def Die(msg):
"""Prints error and exits with status 1.
Args:
msg: The error message to emit
"""
print >> sys.stderr, msg
sys.exit(1)
def GetNodeJsCmd():
"""Ensure Node.js is installed and return the proper command to run."""
logging.info('entering ...')
for cmd in ['node', 'nodejs']:
try:
output = subprocess.check_output([cmd, '--eval', 'console.log("42")'])
if output.strip() == '42':
logging.info('... done')
return cmd
except (subprocess.CalledProcessError, OSError):
continue
Die('Node.js not found. Try "apt-get install nodejs".')
def CheckPrereqs():
"""Checks that various prerequisites for this script are satisfied."""
logging.info('entering ...')
if platform.system() != 'Linux' and platform.system() != 'Darwin':
Die('Sorry, this script assumes Linux or Mac OS X thus far. '
'Please feel free to edit the source and fix it to your needs.')
# Ensure source files are available.
for f in ['webui.js', 'index.html',
'logo-blue.svg', 'package.json']:
if not os.path.exists(f):
Die('%s not found. Must run in amp_validator source directory.' % f)
# Ensure that npm is installed.
try:
npm_version = subprocess.check_output(['npm', '--version'])
except (subprocess.CalledProcessError, OSError):
Die('npm package manager not found. Try "apt-get install npm".')
# Ensure npm version '1.3.10' or newer.
m = re.search('^(\\d+)\\.(\\d+)\\.(\\d+)$', npm_version)
if (int(m.group(1)), int(m.group(2)), int(m.group(3))) < (1, 3, 10):
Die('Expected npm version 1.3.10 or newer, saw: %s' % npm_version)
logging.info('... done')
def SetupOutDir(out_dir):
"""Sets up a clean output directory.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
if os.path.exists(out_dir):
subprocess.check_call(['rm', '-rf', out_dir])
os.mkdir(out_dir)
logging.info('... done')
def InstallNodeDependencies():
"""Installs the dependencies using npm."""
logging.info('entering ...')
# Install the project dependencies specified in package.json into
# node_modules.
logging.info('installing AMP Validator webui dependencies ...')
subprocess.check_call(
['npm', 'install'],
stdout=(open(os.devnull, 'wb') if os.environ.get('TRAVIS') else sys.stdout))
logging.info('... done')
def CreateWebuiAppengineDist(out_dir):
"""Creates the webui vulcanized directory to deploy to Appengine.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
try:
tempdir = tempfile.mkdtemp()
# Merge the contents of webui with the installed node_modules into a
# common root (a temp directory). This lets us use the vulcanize tool.
for entry in os.listdir('.'):
if entry != 'node_modules':
if os.path.isfile(entry):
shutil.copyfile(entry, os.path.join(tempdir, entry))
else:
shutil.copytree(entry, os.path.join(tempdir, entry))
for entry in os.listdir('node_modules'):
if entry == 'web-animations-js':
shutil.copytree(os.path.join('node_modules', entry),
os.path.join(tempdir, '@polymer', entry))
elif entry != '@polymer':
shutil.copytree(os.path.join('node_modules', entry),
os.path.join(tempdir, entry))
for entry in os.listdir('node_modules/@polymer'):
shutil.copytree(os.path.join('node_modules/@polymer', entry),
os.path.join(tempdir, '@polymer', entry))
vulcanized_index_html = subprocess.check_output([
'node_modules/vulcanize/bin/vulcanize',
'--inline-scripts', '--inline-css',
'-p', tempdir, 'index.html'])
finally:
shutil.rmtree(tempdir)
webui_out = os.path.join(out_dir, 'webui_appengine')
shutil.copytree('.', webui_out, ignore=shutil.ignore_patterns('dist'))
f = open(os.path.join(webui_out, 'index.html'), 'w')
f.write(vulcanized_index_html)
f.close()
logging.info('... success')
def Main():
"""The main method, which executes all build steps and runs the tests."""
logging.basicConfig(
format='[[%(filename)s %(funcName)s]] - %(message)s',
level=(logging.ERROR if os.environ.get('TRAVIS') else logging.INFO))
nodejs_cmd = GetNodeJsCmd()
CheckPrereqs()
InstallNodeDependencies()
SetupOutDir(out_dir='dist')
CreateWebuiAppengineDist(out_dir='dist')
if __name__ == '__main__':
Main()
| apache-2.0 | -8,423,039,867,742,467,000 | 32.494186 | 82 | 0.659087 | false |
ToontownUprising/src | otp/namepanel/NameCheck.py | 4 | 10729 | import string
from otp.otpbase import OTPLocalizer
from direct.directnotify import DirectNotifyGlobal
from pandac.PandaModules import NSError
from pandac.PandaModules import TextEncoder, TextNode
notify = DirectNotifyGlobal.directNotify.newCategory('NameCheck')
def filterString(str, filter):
result = ''
for char in str:
if char in filter:
result = result + char
return result
def justLetters(str):
letters = ''
for c in str:
if c.isalpha():
letters = letters + c
return letters
def justUpper(str):
upperCaseLetters = ''
for c in str:
if c.upper() != c.lower():
if c == c.upper():
upperCaseLetters = upperCaseLetters + c
return upperCaseLetters
def wordList(str):
words = str.split()
result = []
for word in words:
subWords = word.split('-')
for sw in subWords:
if sw:
result.append(sw)
return result
def checkName(name, otherCheckFuncs = [], font = None):
def longEnough(name):
if len(name) < 2:
notify.info('name is too short')
return OTPLocalizer.NCTooShort
def emptyName(name):
if name.strip() == '':
notify.info('name is empty')
return OTPLocalizer.NCTooShort
def printableChars(name):
for char in name:
if ord(char) < 128 and char not in string.printable:
notify.info('name contains non-printable char #%s' % ord(char))
return OTPLocalizer.NCGeneric
validAsciiChars = set(".,'-" + string.letters + string.whitespace)
def _validCharacter(c, validAsciiChars = validAsciiChars, font = font):
if c in validAsciiChars:
return True
if c.isalpha() or c.isspace():
return True
return False
def badCharacters(name, _validCharacter = _validCharacter):
for char in name:
if not _validCharacter(char):
if char in string.digits:
notify.info('name contains digits')
return OTPLocalizer.NCNoDigits
else:
notify.info('name contains bad char: %s' % TextEncoder().encodeWtext(char))
return OTPLocalizer.NCBadCharacter % TextEncoder().encodeWtext(char)
def fontHasCharacters(name, font = font):
if font:
tn = TextNode('NameCheck')
tn.setFont(font)
for c in name:
if not tn.hasCharacter(ord(c)):
notify.info('name contains bad char: %s' % TextEncoder().encodeWtext(c))
return OTPLocalizer.NCBadCharacter % TextEncoder().encodeWtext(c)
def hasLetters(name):
words = wordList(name)
for word in words:
letters = justLetters(word)
if len(letters) == 0:
notify.info('word "%s" has no letters' % TextEncoder().encodeWtext(word))
return OTPLocalizer.NCNeedLetters
def hasVowels(name):
def perWord(word):
if '.' in word:
return None
for char in word:
if ord(char) >= 128:
return None
letters = filterString(word, string.letters)
if len(letters) > 2:
vowels = filterString(letters, 'aeiouyAEIOUY')
if len(vowels) == 0:
notify.info('word "%s" has no vowels' % TextEncoder().encodeWtext(word))
return OTPLocalizer.NCNeedVowels
return None
for word in wordList(name):
problem = perWord(word)
if problem:
return problem
def monoLetter(name):
def perWord(word):
word = word
letters = justLetters(word)
if len(letters) > 2:
letters = TextEncoder().decodeText(TextEncoder.lower(TextEncoder().encodeWtext(letters)))
filtered = filterString(letters, letters[0])
if filtered == letters:
notify.info('word "%s" uses only one letter' % TextEncoder().encodeWtext(word))
return OTPLocalizer.NCGeneric
for word in wordList(name):
problem = perWord(word)
if problem:
return problem
def checkDashes(name):
def validDash(index, name=name):
if index == 0 or i == len(name)-1:
return 0
if not name[i-1].isalpha():
return 0
if not name[i+1].isalpha():
return 0
return 1
i=0
while 1:
i = name.find('-', i, len(name))
if i < 0:
return None
if not validDash(i):
notify.info('name makes invalid use of dashes')
return OTPLocalizer.NCDashUsage
i += 1
def checkCommas(name):
def validComma(index, name=name):
if index == 0 or i == len(name)-1:
return OTPLocalizer.NCCommaEdge
if name[i-1].isspace():
return OTPLocalizer.NCCommaAfterWord
if not name[i+1].isspace():
return OTPLocalizer.NCCommaUsage
return None
i=0
while 1:
i = name.find(',', i, len(name))
if i < 0:
return None
problem = validComma(i)
if problem:
notify.info('name makes invalid use of commas')
return problem
i += 1
def checkPeriods(name):
words = wordList(name)
for word in words:
if word[-1] == ',':
word = word[:-1]
numPeriods = word.count('.')
if not numPeriods:
continue
letters = justLetters(word)
numLetters = len(letters)
if word[-1] != '.':
notify.info('word "%s" does not end in a period' % TextEncoder().encodeWtext(word))
return OTPLocalizer.NCPeriodUsage
if numPeriods > 2:
notify.info('word "%s" has too many periods' % TextEncoder().encodeWtext(word))
return OTPLocalizer.NCPeriodUsage
if numPeriods == 2:
if not (word[1] == '.' and word[3] == '.'):
notify.info('word "%s" does not fit the J.T. pattern' % TextEncoder().encodeWtext(word))
return OTPLocalizer.NCPeriodUsage
return None
def checkApostrophes(name):
words = wordList(name)
for word in words:
numApos = word.count("'")
if numApos > 2:
notify.info('word "%s" has too many apostrophes.' % TextEncoder().encodeWtext(word))
return OTPLocalizer.NCApostrophes
numApos = name.count("'")
if numApos > 3:
notify.info('name has too many apostrophes.')
return OTPLocalizer.NCApostrophes
def tooManyWords(name):
if len(wordList(name)) > 4:
notify.info('name has too many words')
return OTPLocalizer.NCTooManyWords
def allCaps(name):
letters = justLetters(name)
if len(letters) > 2:
upperLetters = TextEncoder().decodeText(TextEncoder.upper(TextEncoder().encodeWtext(letters)))
for i in xrange(len(upperLetters)):
if not upperLetters[0].isupper():
return
if upperLetters == letters:
notify.info('name is all caps')
return OTPLocalizer.NCAllCaps
def mixedCase(name):
words = wordList(name)
for word in words:
if len(word) > 2:
capitals = justUpper(word)
if len(capitals) > 2:
notify.info('name has mixed case')
return OTPLocalizer.NCMixedCase
def checkJapanese(name):
asciiSpace = range(32, 33)
asciiDigits = range(48, 64)
hiragana = range(12353, 12448)
katakana = range(12449, 12544)
halfwidthKatakana = range(65381, 65440)
halfwidthCharacter = set(asciiSpace + halfwidthKatakana)
allowedUtf8 = set(asciiSpace + hiragana + katakana + halfwidthKatakana)
te = TextEncoder()
dc = 0.0
for char in (ord(char) for char in te.decodeText(name)):
if char not in allowedUtf8:
if char in asciiDigits:
notify.info('name contains not allowed ascii digits')
return OTPLocalizer.NCNoDigits
else:
notify.info('name contains not allowed utf8 char: 0x%04x' % char)
return OTPLocalizer.NCBadCharacter % te.encodeWtext(unichr(char))
elif char in halfwidthCharacter:
dc += 0.5
else:
dc += 1
if dc < 2:
notify.info('name is too short: %0.1f' % dc)
return OTPLocalizer.NCTooShort
elif dc > 8:
notify.info('name has been occupied more than eight display cells: %0.1f' % dc)
return OTPLocalizer.NCGeneric
def repeatedChars(name):
count = 1
lastChar = None
i = 0
while i < len(name):
char = name[i]
i += 1
if char == lastChar:
count += 1
else:
count = 1
lastChar = char
if count > 2:
notify.info('character %s is repeated too many times' % TextEncoder().encodeWtext(char))
return OTPLocalizer.NCRepeatedChar % TextEncoder().encodeWtext(char)
return
checks = [printableChars,
badCharacters,
fontHasCharacters,
longEnough,
emptyName,
hasLetters,
hasVowels,
monoLetter,
checkDashes,
checkCommas,
checkPeriods,
checkApostrophes,
tooManyWords,
allCaps,
mixedCase,
repeatedChars] + otherCheckFuncs
symmetricChecks = []
name = TextEncoder().decodeText(name)
notify.info('checking name "%s"...' % TextEncoder().encodeWtext(name))
for check in checks:
problem = check(name[:])
if not problem and check in symmetricChecks:
nName = name[:]
bName.reverse()
problem = check(bName)
print 'problem = %s' % problem
if problem:
return problem
return None
severity = notify.getSeverity()
notify.setSeverity(NSError)
for i in xrange(32):
pass
for c in '!"#$%&()*+/:;<=>?@[\\]^_`{|}~':
pass
notify.setSeverity(severity)
del severity
| mit | 508,971,855,783,180,300 | 31.026866 | 108 | 0.540964 | false |
maestro-hybrid-cloud/horizon | openstack_dashboard/dashboards/identity/domains/tests.py | 10 | 22411 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IgnoreArg # noqa
from mox3.mox import IsA # noqa
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.identity.domains import constants
from openstack_dashboard.dashboards.identity.domains import workflows
DOMAINS_INDEX_URL = reverse(constants.DOMAINS_INDEX_URL)
DOMAIN_CREATE_URL = reverse(constants.DOMAINS_CREATE_URL)
DOMAIN_UPDATE_URL = reverse(constants.DOMAINS_UPDATE_URL, args=[1])
USER_ROLE_PREFIX = constants.DOMAIN_USER_MEMBER_SLUG + "_role_"
GROUP_ROLE_PREFIX = constants.DOMAIN_GROUP_MEMBER_SLUG + "_role_"
class DomainsViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.keystone: ('domain_list',)})
def test_index(self):
api.keystone.domain_list(IgnoreArg()).AndReturn(self.domains.list())
self.mox.ReplayAll()
res = self.client.get(DOMAINS_INDEX_URL)
self.assertTemplateUsed(res, constants.DOMAINS_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, self.domains.list())
self.assertContains(res, 'Create Domain')
self.assertContains(res, 'Edit')
self.assertContains(res, 'Delete Domain')
self.assertContains(res, 'Disable Domain')
self.assertContains(res, 'Enable Domain')
@test.create_stubs({api.keystone: ('domain_list',
'keystone_can_edit_domain')})
def test_index_with_keystone_can_edit_domain_false(self):
api.keystone.domain_list(IgnoreArg()).AndReturn(self.domains.list())
api.keystone.keystone_can_edit_domain() \
.MultipleTimes().AndReturn(False)
self.mox.ReplayAll()
res = self.client.get(DOMAINS_INDEX_URL)
self.assertTemplateUsed(res, constants.DOMAINS_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, self.domains.list())
self.assertNotContains(res, 'Create Domain')
self.assertNotContains(res, 'Edit')
self.assertNotContains(res, 'Delete Domain')
self.assertNotContains(res, 'Disable Domain')
self.assertNotContains(res, 'Enable Domain')
@test.create_stubs({api.keystone: ('domain_list',
'domain_delete')})
def test_delete_domain(self):
domain = self.domains.get(id="2")
api.keystone.domain_list(IgnoreArg()).AndReturn(self.domains.list())
api.keystone.domain_delete(IgnoreArg(), domain.id)
self.mox.ReplayAll()
formData = {'action': 'domains__delete__%s' % domain.id}
res = self.client.post(DOMAINS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, DOMAINS_INDEX_URL)
@test.create_stubs({api.keystone: ('domain_list', )})
def test_delete_with_enabled_domain(self):
domain = self.domains.get(id="1")
api.keystone.domain_list(IgnoreArg()).AndReturn(self.domains.list())
self.mox.ReplayAll()
formData = {'action': 'domains__delete__%s' % domain.id}
res = self.client.post(DOMAINS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, DOMAINS_INDEX_URL)
self.assertMessageCount(error=2)
@test.create_stubs({api.keystone: ('domain_list',
'domain_update')})
def test_disable(self):
domain = self.domains.get(id="1")
api.keystone.domain_list(IgnoreArg()).AndReturn(self.domains.list())
api.keystone.domain_update(IsA(http.HttpRequest),
description=domain.description,
domain_id=domain.id,
enabled=False,
name=domain.name).AndReturn(None)
self.mox.ReplayAll()
formData = {'action': 'domains__disable__%s' % domain.id}
res = self.client.post(DOMAINS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, DOMAINS_INDEX_URL)
self.assertMessageCount(error=0)
@test.create_stubs({api.keystone: ('domain_list',
'domain_update')})
def test_enable(self):
domain = self.domains.get(id="2")
api.keystone.domain_list(IgnoreArg()).AndReturn(self.domains.list())
api.keystone.domain_update(IsA(http.HttpRequest),
description=domain.description,
domain_id=domain.id,
enabled=True,
name=domain.name).AndReturn(None)
self.mox.ReplayAll()
formData = {'action': 'domains__enable__%s' % domain.id}
res = self.client.post(DOMAINS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, DOMAINS_INDEX_URL)
self.assertMessageCount(error=0)
@test.create_stubs({api.keystone: ('domain_get',
'domain_list', )})
def test_set_clear_domain_context(self):
domain = self.domains.get(id="1")
api.keystone.domain_get(IgnoreArg(), domain.id).AndReturn(domain)
api.keystone.domain_get(IgnoreArg(), domain.id).AndReturn(domain)
api.keystone.domain_list(IgnoreArg()).AndReturn(self.domains.list())
self.mox.ReplayAll()
formData = {'action': 'domains__set_domain_context__%s' % domain.id}
res = self.client.post(DOMAINS_INDEX_URL, formData)
self.assertTemplateUsed(res, constants.DOMAINS_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, [domain, ])
self.assertContains(res, "<em>test_domain:</em>")
formData = {'action': 'domains__clear_domain_context__%s' % domain.id}
res = self.client.post(DOMAINS_INDEX_URL, formData)
self.assertTemplateUsed(res, constants.DOMAINS_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, self.domains.list())
self.assertNotContains(res, "<em>test_domain:</em>")
class CreateDomainWorkflowTests(test.BaseAdminViewTests):
def _get_domain_info(self, domain):
domain_info = {"name": domain.name,
"description": domain.description,
"enabled": domain.enabled}
return domain_info
def _get_workflow_data(self, domain):
domain_info = self._get_domain_info(domain)
return domain_info
def test_add_domain_get(self):
url = reverse('horizon:identity:domains:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateDomain.name)
self.assertQuerysetEqual(workflow.steps,
['<CreateDomainInfo: create_domain>', ])
@test.create_stubs({api.keystone: ('domain_create', )})
def test_add_domain_post(self):
domain = self.domains.get(id="1")
api.keystone.domain_create(IsA(http.HttpRequest),
description=domain.description,
enabled=domain.enabled,
name=domain.name).AndReturn(domain)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(domain)
res = self.client.post(DOMAIN_CREATE_URL, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, DOMAINS_INDEX_URL)
class UpdateDomainWorkflowTests(test.BaseAdminViewTests):
def _get_domain_info(self, domain):
domain_info = {"domain_id": domain.id,
"name": domain.name,
"description": domain.description,
"enabled": domain.enabled}
return domain_info
def _get_workflow_data(self, domain):
domain_info = self._get_domain_info(domain)
return domain_info
def _get_all_users(self, domain_id=None):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
def _get_domain_groups(self, domain_id):
# all domain groups have role assignments
return self._get_all_groups(domain_id)
def _get_domain_role_assignment(self, domain_id):
domain_scope = {'domain': {'id': domain_id}}
return self.role_assignments.filter(scope=domain_scope)
@test.create_stubs({api.keystone: ('domain_get',
'get_default_role',
'role_list',
'user_list',
'role_assignments_list',
'group_list',
'roles_for_group')})
def test_update_domain_get(self):
default_role = self.roles.first()
domain = self.domains.get(id="1")
users = self._get_all_users(domain.id)
groups = self._get_all_groups(domain.id)
roles = self.roles.list()
role_assignments = self._get_domain_role_assignment(domain.id)
api.keystone.domain_get(IsA(http.HttpRequest), '1').AndReturn(domain)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain.id) \
.AndReturn(users)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
domain=domain.id) \
.AndReturn(role_assignments)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain.id) \
.AndReturn(groups)
for group in groups:
api.keystone.roles_for_group(IsA(http.HttpRequest),
group=group.id,
domain=domain.id) \
.AndReturn(roles)
self.mox.ReplayAll()
res = self.client.get(DOMAIN_UPDATE_URL)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.UpdateDomain.name)
step = workflow.get_step("update_domain")
self.assertEqual(step.action.initial['name'], domain.name)
self.assertEqual(step.action.initial['description'],
domain.description)
self.assertQuerysetEqual(
workflow.steps,
['<UpdateDomainInfo: update_domain>',
'<UpdateDomainUsers: update_user_members>',
'<UpdateDomainGroups: update_group_members>'])
@test.create_stubs({api.keystone: ('domain_get',
'domain_update',
'get_default_role',
'role_list',
'user_list',
'role_assignments_list',
'roles_for_user',
'add_domain_user_role',
'remove_domain_user_role',
'group_list',
'roles_for_group',
'remove_group_role',
'add_group_role',)})
def test_update_domain_post(self):
default_role = self.roles.first()
domain = self.domains.get(id="1")
test_description = 'updated description'
users = self._get_all_users(domain.id)
groups = self._get_all_groups(domain.id)
domain_groups = self._get_domain_groups(domain.id)
roles = self.roles.list()
role_assignments = self._get_domain_role_assignment(domain.id)
api.keystone.domain_get(IsA(http.HttpRequest), '1').AndReturn(domain)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain.id) \
.AndReturn(users)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
domain=domain.id) \
.AndReturn(role_assignments)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain.id) \
.AndReturn(groups)
for group in groups:
api.keystone.roles_for_group(IsA(http.HttpRequest),
group=group.id,
domain=domain.id) \
.AndReturn(roles)
workflow_data = self._get_workflow_data(domain)
# update some fields
workflow_data['description'] = test_description
# User assignment form data
workflow_data[USER_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['2'] # member role
# Group assignment form data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['2'] # member role
# handle
api.keystone.domain_update(IsA(http.HttpRequest),
description=test_description,
domain_id=domain.id,
enabled=domain.enabled,
name=domain.name).AndReturn(None)
api.keystone.user_list(IsA(http.HttpRequest),
domain=domain.id).AndReturn(users)
# admin user - try to remove all roles on current domain, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
domain=domain.id) \
.AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
domain=domain.id) \
.AndReturn((roles[0],))
# remove role 1
api.keystone.remove_domain_user_role(IsA(http.HttpRequest),
domain=domain.id,
user='2',
role='1')
# add role 2
api.keystone.add_domain_user_role(IsA(http.HttpRequest),
domain=domain.id,
user='2',
role='2')
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
domain=domain.id) \
.AndReturn((roles[1],))
# remove role 2
api.keystone.remove_domain_user_role(IsA(http.HttpRequest),
domain=domain.id,
user='3',
role='2')
# add role 1
api.keystone.add_domain_user_role(IsA(http.HttpRequest),
domain=domain.id,
user='3',
role='1')
# member user 5 - do nothing
api.keystone.roles_for_user(IsA(http.HttpRequest), '5',
domain=domain.id) \
.AndReturn([])
# Group assignments
api.keystone.group_list(IsA(http.HttpRequest),
domain=domain.id).AndReturn(domain_groups)
# admin group - try to remove all roles on current domain
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='1',
domain=domain.id) \
.AndReturn(roles)
for role in roles:
api.keystone.remove_group_role(IsA(http.HttpRequest),
role=role.id,
group='1',
domain=domain.id)
# member group 1 - has role 1, will remove it
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='2',
domain=domain.id) \
.AndReturn((roles[0],))
# remove role 1
api.keystone.remove_group_role(IsA(http.HttpRequest),
role='1',
group='2',
domain=domain.id)
# add role 2
api.keystone.add_group_role(IsA(http.HttpRequest),
role='2',
group='2',
domain=domain.id)
# member group 3 - has role 2
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='3',
domain=domain.id) \
.AndReturn((roles[1],))
# remove role 2
api.keystone.remove_group_role(IsA(http.HttpRequest),
role='2',
group='3',
domain=domain.id)
# add role 1
api.keystone.add_group_role(IsA(http.HttpRequest),
role='1',
group='3',
domain=domain.id)
self.mox.ReplayAll()
res = self.client.post(DOMAIN_UPDATE_URL, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(res, DOMAINS_INDEX_URL)
@test.create_stubs({api.keystone: ('domain_get',)})
def test_update_domain_get_error(self):
domain = self.domains.get(id="1")
api.keystone.domain_get(IsA(http.HttpRequest), domain.id) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
res = self.client.get(DOMAIN_UPDATE_URL)
self.assertRedirectsNoFollow(res, DOMAINS_INDEX_URL)
@test.create_stubs({api.keystone: ('domain_get',
'domain_update',
'get_default_role',
'role_list',
'user_list',
'role_assignments_list',
'group_list',
'roles_for_group')})
def test_update_domain_post_error(self):
default_role = self.roles.first()
domain = self.domains.get(id="1")
test_description = 'updated description'
users = self._get_all_users(domain.id)
groups = self._get_all_groups(domain.id)
roles = self.roles.list()
role_assignments = self._get_domain_role_assignment(domain.id)
api.keystone.domain_get(IsA(http.HttpRequest), '1').AndReturn(domain)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain.id) \
.AndReturn(users)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
domain=domain.id) \
.AndReturn(role_assignments)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain.id) \
.AndReturn(groups)
for group in groups:
api.keystone.roles_for_group(IsA(http.HttpRequest),
group=group.id,
domain=domain.id) \
.AndReturn(roles)
workflow_data = self._get_workflow_data(domain)
# update some fields
workflow_data['description'] = test_description
# User assignment form data
workflow_data[USER_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['2'] # member role
# Group assignment form data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['2'] # member role
# handle
api.keystone.domain_update(IsA(http.HttpRequest),
description=test_description,
domain_id=domain.id,
enabled=domain.enabled,
name=domain.name) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
res = self.client.post(DOMAIN_UPDATE_URL, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=1)
self.assertRedirectsNoFollow(res, DOMAINS_INDEX_URL)
| apache-2.0 | -1,695,528,857,215,956,200 | 40.968165 | 78 | 0.53911 | false |
gregerhardt/dta | dta/DynameqScenario.py | 2 | 12959 | __copyright__ = "Copyright 2011 SFCTA"
__license__ = """
This file is part of DTA.
DTA is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DTA is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DTA. If not, see <http://www.gnu.org/licenses/>.
"""
import datetime
import os
from .DtaError import DtaError
from .Logger import DtaLogger
from .Scenario import Scenario
from .Utils import Time
from .VehicleClassGroup import VehicleClassGroup
from .VehicleType import VehicleType
class DynameqScenario(Scenario):
"""
A Dynameq Scenario.
"""
SCENARIO_FILE = '%s_scen.dqt'
ADVANCED_HEADER = """<DYNAMEQ>
<VERSION_1.7>
<SCENARIO_FILE>
* CREATED by DTA Anyway http://code.google.com/p/dta/
"""
@classmethod
def read(cls, dir, prefix):
"""
Read the scenario file from disk and return the corresponding
scnario object.
"""
sc = Scenario()
sc.read(dir, prefix)
def __init__(self, startTime = Time(0,0), endTime=Time(23,0)):
"""
Constructor of a Scenario for Dynameq.
:param startTime: the start time of the scenario.
:type startTime: a :py:class:`dta.Time` instance
:param endTime: the end time of the scenario.
:type endTime: a :py:class:`dta.Time` instance
"""
Scenario.__init__(self, startTime, endTime)
# for now just a list: (name, units, turn_expr, link_expr, desc)
self._generalizedCosts = []
def read(self, dir, file_prefix):
"""
Reads the scenario configuration from the Dynameq scenario file.
"""
# scenario file processing
scenariofile = os.path.join(dir, DynameqScenario.SCENARIO_FILE % file_prefix)
if not os.path.exists(scenariofile):
raise DtaError("Scenario file %s does not exist" % scenariofile)
for fields in self._readSectionFromFile(scenariofile, "STUDY_PERIOD", "EVENTS"):
self._readStudyPeriodFromFields(fields)
DtaLogger.info("Read %8d %-16s from %s" % (1, "STUDY_PERIOD", scenariofile))
count = 0
for fields in self._readSectionFromFile(scenariofile, "EVENTS", "VEH_CLASSES"):
#self._addEventFromFields(fields)
count += 1
DtaLogger.info("Read %8d %-16s from %s" % (count, "EVENTS", scenariofile))
count = 0
for fields in self._readSectionFromFile(scenariofile, "VEH_CLASSES", "VEH_TYPES"):
self._readVehicleClassFromFields(fields)
count += 1
DtaLogger.info("Read %8d %-16s from %s" % (count, "VEH_CLASSES", scenariofile))
count = 0
for fields in self._readSectionFromFile(scenariofile, "VEH_TYPES", "VEH_CLASS_GROUPS"):
self.addVehicleType(self._readVehicleTypeFromFields(fields))
count += 1
DtaLogger.info("Read %8d %-16s from %s" % (count, "VEH_TYPES", scenariofile))
count = 0
for fields in self._readSectionFromFile(scenariofile, "VEH_CLASS_GROUPS", "GENERALIZED_COSTS"):
self.addVehicleClassGroup(self._readVehicleClassGroupFromFields(fields))
count += 1
DtaLogger.info("Read %8d %-16s from %s" % (count, "VEH_CLASS_GROUPS", scenariofile))
count = 0
for fields in self._readSectionFromFile(scenariofile, "GENERALIZED_COSTS", "ENDOFFILE"):
self._readGeneralizedCostFromFields(fields)
count += 1
DtaLogger.info("Read %8d %-16s from %s" % (count, "GENERALIZED_COSTS", scenariofile))
def write(self, dir, file_prefix):
scenariofile = os.path.join(dir, DynameqScenario.SCENARIO_FILE % file_prefix)
scenariofile_object = open(scenariofile, "w")
scenariofile_object.write(DynameqScenario.ADVANCED_HEADER)
self._writeStudyPeriodToScenarioFile(scenariofile_object)
self._writeEventsToScenarioFile(scenariofile_object)
self._writeVehicleClassesToScenarioFile(scenariofile_object)
self._writeVehicleTypesToScenarioFile(scenariofile_object)
self._writeVehicleClassGroupsToScenarioFile(scenariofile_object)
self._writeGeneralizedCostsToScenarioFile(scenariofile_object)
scenariofile_object.close()
def _readSectionFromFile(self, filename, sectionName, nextSectionName):
"""
Generator function, yields fields (array of strings) from the given section of the given file.
"""
lines = open(filename, "r")
curLine = ""
try:
# find the section
while curLine != sectionName:
curLine = lines.next().strip()
except StopIteration:
raise DtaError("DynameqNetwork _readSectionFromFile failed to find %s in %s" %
(sectionName,filename))
# go past the section name
curLine = lines.next().strip()
# skip any comments
while curLine[0] == "*":
curLine = lines.next().strip()
# these are the ones we want
while not curLine == nextSectionName:
fields = curLine.split()
yield fields
curLine = lines.next().strip()
lines.close()
raise StopIteration
def _readStudyPeriodFromFields(self, fields):
"""
Reads the study period and sets the :py:attr:`Scenario.startTime` and :py:attr:`Scenario.endTime`
"""
time1 = fields[0].split(":")
time2 = fields[1].split(":")
self.startTime = Time(hour=int(time1[0]), minute=int(time1[1]))
self.endTime = Time(hour=int(time2[0]), minute=int(time2[1]))
def _writeStudyPeriodToScenarioFile(self, scenariofile_object):
"""
Write version of _readStudyPeriodFromScenarioFile(). *scenariofile_object* is the file object,
ready for writing.
"""
scenariofile_object.write("STUDY_PERIOD\n")
scenariofile_object.write("* start end\n")
scenariofile_object.write(" %02d:%02d %02d:%02d\n" % (self.startTime.hour, self.startTime.minute,
self.endTime.hour, self.endTime.minute))
def _readEventsFromFields(self, scenariofile):
"""
Generator function, yields (eventTime, eventDescription) to the caller
TODO: update to use dta.Time rather than datetime.time for consistency.
"""
timestrs = fields[0].split(":")
eventTime = datetime.time(hour=int(timestrs[0]), minute=int(timestrs[1]))
eventDesc = fields[1]
self.events[eventTime] = self.eventDesc
def _writeEventsToScenarioFile(self, scenariofile_object):
"""
Write version of _readEventsFromScenarioFile(). *scenariofile_object* is the file object,
ready for writing.
"""
scenariofile_object.write("EVENTS\n")
scenariofile_object.write("* time desc\n")
count = 0
for eventTime in sorted(self.events.keys()):
scenariofile_object.write(" %02d:%02d %56s\n" % (eventTime.hour, eventTime.minute,
self.events[eventTime]))
count += 1
DtaLogger.info("Wrote %8d %-16s to %s" % (count, "EVENTS", scenariofile_object.name))
def _readVehicleClassFromFields(self, fields):
self.addVehicleClass(fields[0])
def _writeVehicleClassesToScenarioFile(self, scenariofile_object):
"""
Write version of _readVehicleClassesFromScenarioFile(). *scenariofile_object* is the file object,
ready for writing.
"""
scenariofile_object.write("VEH_CLASSES\n")
scenariofile_object.write("* class_name\n")
count = 0
for vehicleClassName in self.vehicleClassNames:
scenariofile_object.write("%17s\n" % vehicleClassName)
count += 1
DtaLogger.info("Wrote %8d %-16s to %s" % (count, "VEH_CLASSES", scenariofile_object.name))
def _readVehicleTypeFromFields(self, fields):
"""
Returns a VehicleType
"""
vehicleClassName = fields[0]
vehicleTypeName = fields[1]
length = float(fields[2])
responseTime = float(fields[3])
maxSpeed = float(fields[4])
speedRatio = float(fields[5])
return VehicleType(vehicleTypeName,
vehicleClassName,
length,
responseTime,
maxSpeed,
speedRatio)
def _writeVehicleTypesToScenarioFile(self, scenariofile_object):
"""
Write version of _readVehicleTypesFromScenarioFile(). *scenariofile_object* is the file object,
ready for writing.
"""
scenariofile_object.write("VEH_TYPES\n")
scenariofile_object.write("*class_name type_name length res_time max_speed speed_ratio\n")
count = 0
for vehicleType in self.vehicleTypes:
scenariofile_object.write("%13s %13s %8.2f %8.2f %8.2f %8.2f\n" % (vehicleType.className,
vehicleType.name,
vehicleType.length,
vehicleType.responseTime,
vehicleType.maxSpeed,
vehicleType.speedRatio))
count += 1
DtaLogger.info("Wrote %8d %-16s to %s" % (count, "VEH_TYPES", scenariofile_object.name))
def _readVehicleClassGroupFromFields(self, fields):
"""
Returns a VehicleClassGroup
"""
groupName = fields[0]
classDef = fields[1]
colorCode = fields[2]
return VehicleClassGroup(groupName, classDef, colorCode)
def _writeVehicleClassGroupsToScenarioFile(self, scenariofile_object):
"""
Write version of _readVehicleClassGroupsFromScenarioFile(). *scenariofile_object* is the file object,
ready for writing.
"""
scenariofile_object.write("VEH_CLASS_GROUPS\n")
scenariofile_object.write("* name class color\n")
count = 0
for groupname in sorted(self.vehicleClassGroups.keys()):
scenariofile_object.write("%11s %7s %10s\n" % (groupname,
self.vehicleClassGroups[groupname].classDefinitionString,
self.vehicleClassGroups[groupname].colorCode))
count += 1
DtaLogger.info("Wrote %8d %-16s to %s" % (count, "VEH_CLASS_GROUPS", scenariofile_object.name))
def addGeneralizedCost(self, name, units, turn_expr, link_expr, desc):
"""
TODO: need more documentation on these terms.
"""
self._generalizedCosts.append((name, units, turn_expr, link_expr, desc))
def _readGeneralizedCostFromFields(self, fields):
self._generalizedCosts.append(fields)
def _writeGeneralizedCostsToScenarioFile(self, scenariofile_object):
"""
Write version of _readGenarlizedCostFromFields().
*scenariofile_object* should be ready for writing
"""
scenariofile_object.write("GENERALIZED_COSTS\n")
scenariofile_object.write("* name units turn_expr link_expr desc\n")
count = 0
for gc in self._generalizedCosts:
scenariofile_object.write(" ".join(gc) + "\n")
count += 1
DtaLogger.info("Wrote %8d %-16s to %s" % (count, "GENERALIZED_COSTS", scenariofile_object.name))
| gpl-3.0 | -3,406,286,406,759,303,700 | 42.078231 | 133 | 0.566402 | false |
kakunbsc/enigma2.4 | lib/python/Plugins/SystemPlugins/CommonInterfaceAssignment/plugin.py | 1 | 26333 | from Screens.Screen import Screen
from Screens.ChannelSelection import *
from Components.ActionMap import HelpableActionMap, ActionMap, NumberActionMap
from Components.Sources.List import List
from Components.Sources.StaticText import StaticText
from Components.config import ConfigNothing
from Components.ConfigList import ConfigList
from Components.Label import Label
from Components.SelectionList import SelectionList
from Components.MenuList import MenuList
from ServiceReference import ServiceReference
from Plugins.Plugin import PluginDescriptor
from xml.etree.cElementTree import parse as ci_parse
from Tools.XMLTools import elementsWithTag, mergeText, stringToXML
from enigma import eDVBCI_UI, eDVBCIInterfaces
from os import system, path as os_path
class CIselectMainMenu(Screen):
skin = """
<screen name="CIselectMainMenu" position="center,center" size="500,250" title="CI assignment" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="CiList" position="5,50" size="490,200" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, args = 0):
Screen.__init__(self, session)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Edit"))
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"green": self.greenPressed,
"red": self.close,
"ok": self.greenPressed,
"cancel": self.close
}, -1)
NUM_CI=eDVBCIInterfaces.getInstance().getNumOfSlots()
print "[CI_Wizzard] FOUND %d CI Slots " % NUM_CI
self.dlg = None
self.state = { }
self.list = [ ]
if NUM_CI > 0:
for slot in range(NUM_CI):
state = eDVBCI_UI.getInstance().getState(slot)
if state == 0:
appname = _("Slot %d") %(slot+1) + " - " + _("no module found")
elif state == 1:
appname = _("Slot %d") %(slot+1) + " - " + _("init modules")
elif state == 2:
appname = _("Slot %d") %(slot+1) + " - " + eDVBCI_UI.getInstance().getAppName(slot)
# ikseong
else :
appname = _("Slot %d") %(slot+1) + " - " + _("no module found")
#
self.list.append( (appname, ConfigNothing(), 0, slot) )
else:
self.list.append( (_("no CI slots found") , ConfigNothing(), 1, -1) )
menuList = ConfigList(self.list)
menuList.list = self.list
menuList.l.setList(self.list)
self["CiList"] = menuList
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("CI assignment"))
def greenPressed(self):
cur = self["CiList"].getCurrent()
if cur and len(cur) > 2:
action = cur[2]
slot = cur[3]
if action == 1:
print "[CI_Wizzard] there is no CI Slot in your receiver"
else:
print "[CI_Wizzard] selected CI Slot : %d" % slot
if config.usage.setup_level.index > 1: # advanced
self.session.open(CIconfigMenu, slot)
else:
self.session.open(easyCIconfigMenu, slot)
"""def yellowPressed(self): # unused
NUM_CI=eDVBCIInterfaces.getInstance().getNumOfSlots()
print "[CI_Check] FOUND %d CI Slots " % NUM_CI
if NUM_CI > 0:
for ci in range(NUM_CI):
print eDVBCIInterfaces.getInstance().getDescrambleRules(ci)"""
class CIconfigMenu(Screen):
skin = """
<screen name="CIconfigMenu" position="center,center" size="560,440" title="CI assignment" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="CAidList_desc" render="Label" position="5,50" size="550,22" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<widget source="CAidList" render="Label" position="5,80" size="550,45" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<ePixmap pixmap="skin_default/div-h.png" position="0,125" zPosition="1" size="560,2" />
<widget source="ServiceList_desc" render="Label" position="5,130" size="550,22" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<widget name="ServiceList" position="5,160" size="550,250" zPosition="1" scrollbarMode="showOnDemand" />
<widget source="ServiceList_info" render="Label" position="5,160" size="550,250" zPosition="2" font="Regular;20" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, ci_slot="9"):
Screen.__init__(self, session)
self.ci_slot=ci_slot
self.filename="/etc/enigma2/ci"+str(self.ci_slot)+".xml"
self["key_red"] = StaticText(_("Delete"))
self["key_green"] = StaticText(_("add Service"))
self["key_yellow"] = StaticText(_("add Provider"))
self["key_blue"] = StaticText(_("select CAId"))
self["CAidList_desc"] = StaticText(_("assigned CAIds:"))
self["CAidList"] = StaticText()
self["ServiceList_desc"] = StaticText(_("assigned Services/Provider:"))
self["ServiceList_info"] = StaticText()
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"green": self.greenPressed,
"red": self.redPressed,
"yellow": self.yellowPressed,
"blue": self.bluePressed,
"cancel": self.cancel
}, -1)
print "[CI_Wizzard_Config] Configuring CI Slots : %d " % self.ci_slot
i=0
self.caidlist=[]
print eDVBCIInterfaces.getInstance().readCICaIds(self.ci_slot)
for caid in eDVBCIInterfaces.getInstance().readCICaIds(self.ci_slot):
i+=1
self.caidlist.append((str(hex(int(caid))),str(caid),i))
print "[CI_Wizzard_Config_CI%d] read following CAIds from CI: %s" %(self.ci_slot, self.caidlist)
self.selectedcaid = []
self.servicelist = []
self.caids = ""
serviceList = ConfigList(self.servicelist)
serviceList.list = self.servicelist
serviceList.l.setList(self.servicelist)
self["ServiceList"] = serviceList
self.loadXML()
# if config mode !=advanced autoselect any caid
if config.usage.setup_level.index <= 1: # advanced
self.selectedcaid=self.caidlist
self.finishedCAidSelection(self.selectedcaid)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("CI assignment"))
def redPressed(self):
self.delete()
def greenPressed(self):
self.session.openWithCallback( self.finishedChannelSelection, myChannelSelection, None)
def yellowPressed(self):
self.session.openWithCallback( self.finishedProviderSelection, myProviderSelection, None)
def bluePressed(self):
self.session.openWithCallback(self.finishedCAidSelection, CAidSelect, self.caidlist, self.selectedcaid)
def cancel(self):
self.saveXML()
activate_all(self)
self.close()
def setServiceListInfo(self):
if len(self.servicelist):
self["ServiceList_info"].setText("")
else:
self["ServiceList_info"].setText(_("no Services/Providers selected"))
def delete(self):
cur = self["ServiceList"].getCurrent()
if cur and len(cur) > 2:
self.servicelist.remove(cur)
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
def finishedChannelSelection(self, *args):
if len(args):
ref=args[0]
service_ref = ServiceReference(ref)
service_name = service_ref.getServiceName()
if find_in_list(self.servicelist, service_name, 0)==False:
split_ref=service_ref.ref.toString().split(":")
if split_ref[0] == "1":#== dvb service und nicht muell von None
self.servicelist.append( (service_name , ConfigNothing(), 0, service_ref.ref.toString()) )
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
def finishedProviderSelection(self, *args):
if len(args)>1: # bei nix selected kommt nur 1 arg zurueck (==None)
name=args[0]
dvbnamespace=args[1]
if find_in_list(self.servicelist, name, 0)==False:
self.servicelist.append( (name , ConfigNothing(), 1, dvbnamespace) )
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
def finishedCAidSelection(self, *args):
if len(args):
self.selectedcaid=args[0]
self.caids=""
if len(self.selectedcaid):
for item in self.selectedcaid:
if len(self.caids):
self.caids+= ", " + item[0]
else:
self.caids=item[0]
else:
self.selectedcaid=[]
self.caids=_("no CAId selected")
else:
self.selectedcaid=[]
self.caids=_("no CAId selected")
self["CAidList"].setText(self.caids)
def saveXML(self):
try:
fp = file(self.filename, 'w')
fp.write("<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n")
fp.write("<ci>\n")
fp.write("\t<slot>\n")
fp.write("\t\t<id>%s</id>\n" % self.ci_slot)
for item in self.selectedcaid:
if len(self.selectedcaid):
fp.write("\t\t<caid id=\"%s\" />\n" % item[0])
for item in self.servicelist:
if len(self.servicelist):
if item[2]==1:
fp.write("\t\t<provider name=\"%s\" dvbnamespace=\"%s\" />\n" % (item[0], item[3]))
else:
fp.write("\t\t<service name=\"%s\" ref=\"%s\" />\n" % (item[0], item[3]))
fp.write("\t</slot>\n")
fp.write("</ci>\n")
fp.close()
except:
print "[CI_Config_CI%d] xml not written" %self.ci_slot
os.unlink(self.filename)
def loadXML(self):
if not os_path.exists(self.filename):
return
def getValue(definitions, default):
ret = ""
Len = len(definitions)
return Len > 0 and definitions[Len-1].text or default
try:
tree = ci_parse(self.filename).getroot()
self.read_services=[]
self.read_providers=[]
self.usingcaid=[]
self.ci_config=[]
for slot in tree.findall("slot"):
read_slot = getValue(slot.findall("id"), False).encode("UTF-8")
print "ci " + read_slot
i=0
for caid in slot.findall("caid"):
read_caid = caid.get("id").encode("UTF-8")
self.selectedcaid.append((str(read_caid),str(read_caid),i))
self.usingcaid.append(long(read_caid,16))
i+=1
for service in slot.findall("service"):
read_service_name = service.get("name").encode("UTF-8")
read_service_ref = service.get("ref").encode("UTF-8")
self.read_services.append (read_service_ref)
for provider in slot.findall("provider"):
read_provider_name = provider.get("name").encode("UTF-8")
read_provider_dvbname = provider.get("dvbnamespace").encode("UTF-8")
self.read_providers.append((read_provider_name,read_provider_dvbname))
self.ci_config.append((int(read_slot), (self.read_services, self.read_providers, self.usingcaid)))
except:
print "[CI_Config_CI%d] error parsing xml..." %self.ci_slot
for item in self.read_services:
if len(item):
self.finishedChannelSelection(item)
for item in self.read_providers:
if len(item):
self.finishedProviderSelection(item[0],item[1])
print self.ci_config
self.finishedCAidSelection(self.selectedcaid)
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
class easyCIconfigMenu(CIconfigMenu):
skin = """
<screen name="easyCIconfigMenu" position="center,center" size="560,440" title="CI assignment" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="ServiceList_desc" render="Label" position="5,50" size="550,22" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<widget name="ServiceList" position="5,80" size="550,300" zPosition="1" scrollbarMode="showOnDemand" />
<widget source="ServiceList_info" render="Label" position="5,80" size="550,300" zPosition="2" font="Regular;20" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, ci_slot="9"):
ci=ci_slot
CIconfigMenu.__init__(self, session, ci_slot)
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"green": self.greenPressed,
"red": self.redPressed,
"yellow": self.yellowPressed,
"cancel": self.cancel
})
class CAidSelect(Screen):
skin = """
<screen name="CAidSelect" position="center,center" size="450,440" title="select CAId's" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="list" position="5,50" size="440,330" scrollbarMode="showOnDemand" />
<ePixmap pixmap="skin_default/div-h.png" position="0,390" zPosition="1" size="450,2" />
<widget source="introduction" render="Label" position="0,400" size="450,40" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, list, selected_caids):
Screen.__init__(self, session)
self.list = SelectionList()
self["list"] = self.list
for listindex in range(len(list)):
if find_in_list(selected_caids,list[listindex][0],0):
self.list.addSelection(list[listindex][0], list[listindex][1], listindex, True)
else:
self.list.addSelection(list[listindex][0], list[listindex][1], listindex, False)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["introduction"] = StaticText(_("Press OK to select/deselect a CAId."))
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"ok": self.list.toggleSelection,
"cancel": self.cancel,
"green": self.greenPressed,
"red": self.cancel
}, -1)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("select CAId's"))
def greenPressed(self):
list = self.list.getSelectionsList()
print list
self.close(list)
def cancel(self):
self.close()
class myProviderSelection(ChannelSelectionBase):
skin = """
<screen name="myProviderSelection" position="center,center" size="560,440" title="Select provider to add...">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="list" position="5,50" size="550,330" scrollbarMode="showOnDemand" />
<ePixmap pixmap="skin_default/div-h.png" position="0,390" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="0,400" size="560,40" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, title):
ChannelSelectionBase.__init__(self, session)
self.onShown.append(self.__onExecCallback)
self["actions"] = ActionMap(["OkCancelActions", "ChannelSelectBaseActions"],
{
"showFavourites": self.doNothing,
"showAllServices": self.cancel,
"showProviders": self.doNothing,
"showSatellites": self.doNothing,
"cancel": self.cancel,
"ok": self.channelSelected
})
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText()
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["introduction"] = StaticText(_("Press OK to select a Provider."))
def doNothing(self):
pass
def __onExecCallback(self):
self.showSatellites()
self.setTitle(_("Select provider to add..."))
def channelSelected(self): # just return selected service
ref = self.getCurrentSelection()
splited_ref=ref.toString().split(":")
if ref.flags == 7 and splited_ref[6] != "0":
self.dvbnamespace=splited_ref[6]
self.enterPath(ref)
else:
self.close(ref.getName(), self.dvbnamespace)
def showSatellites(self):
if not self.pathChangeDisabled:
refstr = '%s FROM SATELLITES ORDER BY satellitePosition'%(self.service_types)
if not self.preEnterPath(refstr):
ref = eServiceReference(refstr)
justSet=False
prev = None
if self.isBasePathEqual(ref):
if self.isPrevPathEqual(ref):
justSet=True
prev = self.pathUp(justSet)
else:
currentRoot = self.getRoot()
if currentRoot is None or currentRoot != ref:
justSet=True
self.clearPath()
self.enterPath(ref, True)
if justSet:
serviceHandler = eServiceCenter.getInstance()
servicelist = serviceHandler.list(ref)
if not servicelist is None:
while True:
service = servicelist.getNext()
if not service.valid(): #check if end of list
break
unsigned_orbpos = service.getUnsignedData(4) >> 16
orbpos = service.getData(4) >> 16
if orbpos < 0:
orbpos += 3600
if service.getPath().find("FROM PROVIDER") != -1:
service_type = _("Providers")
try:
# why we need this cast?
service_name = str(nimmanager.getSatDescription(orbpos))
except:
if unsigned_orbpos == 0xFFFF: #Cable
service_name = _("Cable")
elif unsigned_orbpos == 0xEEEE: #Terrestrial
service_name = _("Terrestrial")
else:
if orbpos > 1800: # west
orbpos = 3600 - orbpos
h = _("W")
else:
h = _("E")
service_name = ("%d.%d" + h) % (orbpos / 10, orbpos % 10)
service.setName("%s - %s" % (service_name, service_type))
self.servicelist.addService(service)
self.servicelist.finishFill()
if prev is not None:
self.setCurrentSelection(prev)
def cancel(self):
self.close(None)
class myChannelSelection(ChannelSelectionBase):
skin = """
<screen name="myChannelSelection" position="center,center" size="560,440" title="Select service to add...">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="list" position="5,50" size="550,330" scrollbarMode="showOnDemand" />
<ePixmap pixmap="skin_default/div-h.png" position="0,390" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="0,400" size="560,40" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, title):
ChannelSelectionBase.__init__(self, session)
self.onShown.append(self.__onExecCallback)
self["actions"] = ActionMap(["OkCancelActions", "TvRadioActions", "ChannelSelectBaseActions"],
{
"showProviders": self.doNothing,
"showSatellites": self.showAllServices,
"showAllServices": self.cancel,
"cancel": self.cancel,
"ok": self.channelSelected,
"keyRadio": self.setModeRadio,
"keyTV": self.setModeTv
})
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("All"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText(_("Favourites"))
self["introduction"] = StaticText(_("Press OK to select a Provider."))
def __onExecCallback(self):
self.setModeTv()
self.setTitle(_("Select service to add..."))
def doNothing(self):
pass
def channelSelected(self): # just return selected service
ref = self.getCurrentSelection()
if (ref.flags & 7) == 7:
self.enterPath(ref)
elif not (ref.flags & eServiceReference.isMarker):
ref = self.getCurrentSelection()
self.close(ref)
def setModeTv(self):
self.setTvMode()
self.showFavourites()
def setModeRadio(self):
self.setRadioMode()
self.showFavourites()
def cancel(self):
self.close(None)
def activate_all(session):
NUM_CI=eDVBCIInterfaces.getInstance().getNumOfSlots()
print "[CI_Activate] FOUND %d CI Slots " % NUM_CI
if NUM_CI > 0:
ci_config=[]
def getValue(definitions, default):
# Initialize Output
ret = ""
# How many definitions are present
Len = len(definitions)
return Len > 0 and definitions[Len-1].text or default
for ci in range(NUM_CI):
filename="/etc/enigma2/ci"+str(ci)+".xml"
if not os_path.exists(filename):
print "[CI_Activate_Config_CI%d] no config file found" %ci
try:
tree = ci_parse(filename).getroot()
read_services=[]
read_providers=[]
usingcaid=[]
for slot in tree.findall("slot"):
read_slot = getValue(slot.findall("id"), False).encode("UTF-8")
for caid in slot.findall("caid"):
read_caid = caid.get("id").encode("UTF-8")
usingcaid.append(long(read_caid,16))
for service in slot.findall("service"):
read_service_ref = service.get("ref").encode("UTF-8")
read_services.append (read_service_ref)
for provider in slot.findall("provider"):
read_provider_name = provider.get("name").encode("UTF-8")
read_provider_dvbname = provider.get("dvbnamespace").encode("UTF-8")
read_providers.append((read_provider_name,long(read_provider_dvbname,16)))
ci_config.append((int(read_slot), (read_services, read_providers, usingcaid)))
except:
print "[CI_Activate_Config_CI%d] error parsing xml..." %ci
for item in ci_config:
print "[CI_Activate] activate CI%d with following settings:" %item[0]
print item[0]
print item[1]
try:
eDVBCIInterfaces.getInstance().setDescrambleRules(item[0],item[1])
except:
print "[CI_Activate_Config_CI%d] error setting DescrambleRules..." %item[0]
def find_in_list(list, search, listpos=0):
for item in list:
if item[listpos]==search:
return True
return False
global_session = None
def sessionstart(reason, session):
global global_session
global_session = session
def autostart(reason, **kwargs):
global global_session
if reason == 0:
print "[CI_Assignment] activating ci configs:"
activate_all(global_session)
elif reason == 1:
global_session = None
def main(session, **kwargs):
session.open(CIselectMainMenu)
def menu(menuid, **kwargs):
if menuid == "setup" and eDVBCIInterfaces.getInstance().getNumOfSlots():
return [(_("Common Interface Assignment"), main, "ci_assign", 11)]
return [ ]
def Plugins(**kwargs):
if config.usage.setup_level.index > 1:
return [PluginDescriptor( where = PluginDescriptor.WHERE_SESSIONSTART, fnc = sessionstart ),
PluginDescriptor( where = PluginDescriptor.WHERE_AUTOSTART, fnc = autostart ),
PluginDescriptor( name = "CommonInterfaceAssignment", description = _("a gui to assign services/providers/caids to common interface modules"), where = PluginDescriptor.WHERE_MENU, fnc = menu )]
else:
return [PluginDescriptor( where = PluginDescriptor.WHERE_SESSIONSTART, fnc = sessionstart ),
PluginDescriptor( where = PluginDescriptor.WHERE_AUTOSTART, fnc = autostart ),
PluginDescriptor( name = "CommonInterfaceAssignment", description = _("a gui to assign services/providers to common interface modules"), where = PluginDescriptor.WHERE_MENU, fnc = menu )]
| gpl-2.0 | 3,737,866,505,738,427,400 | 39.57473 | 197 | 0.682793 | false |
octocoin-project/octocoin | share/seeds/generate-seeds.py | 2 | 4298 | #!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by share/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 22889)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 32889)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| mit | 1,106,258,131,989,535,100 | 30.837037 | 98 | 0.576082 | false |
afaheem88/tempest_neutron | tempest/api/baremetal/admin/test_nodestates.py | 8 | 2363 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.baremetal.admin import base
from tempest import exceptions
from tempest.openstack.common import timeutils
from tempest import test
class TestNodeStates(base.BaseBaremetalTest):
"""Tests for baremetal NodeStates."""
@classmethod
def resource_setup(cls):
super(TestNodeStates, cls).resource_setup()
_, cls.chassis = cls.create_chassis()
_, cls.node = cls.create_node(cls.chassis['uuid'])
def _validate_power_state(self, node_uuid, power_state):
# Validate that power state is set within timeout
if power_state == 'rebooting':
power_state = 'power on'
start = timeutils.utcnow()
while timeutils.delta_seconds(
start, timeutils.utcnow()) < self.power_timeout:
_, node = self.client.show_node(node_uuid)
if node['power_state'] == power_state:
return
message = ('Failed to set power state within '
'the required time: %s sec.' % self.power_timeout)
raise exceptions.TimeoutException(message)
@test.attr(type='smoke')
def test_list_nodestates(self):
_, nodestates = self.client.list_nodestates(self.node['uuid'])
for key in nodestates:
self.assertEqual(nodestates[key], self.node[key])
@test.attr(type='smoke')
def test_set_node_power_state(self):
_, node = self.create_node(self.chassis['uuid'])
states = ["power on", "rebooting", "power off"]
for state in states:
# Set power state
self.client.set_node_power_state(node['uuid'], state)
# Check power state after state is set
self._validate_power_state(node['uuid'], state)
| apache-2.0 | 8,783,356,742,742,261,000 | 39.741379 | 78 | 0.650868 | false |
sigwo/simplecoin_multi | manage.py | 3 | 10705 | import argparse
import json
import os
import datetime
from simplecoin import create_manage_app, db, currencies, powerpools, redis_conn
from simplecoin.scheduler import SchedulerCommand
from simplecoin.models import (Transaction, UserSettings, Credit, ShareSlice,
DeviceSlice, Block, CreditExchange)
from urlparse import urlparse
from flask import current_app, _request_ctx_stack
from flask.ext.migrate import stamp
from flask.ext.script import Manager, Shell, Server
from flask.ext.migrate import MigrateCommand
manager = Manager(create_manage_app)
@manager.option('-e', '--emit', help='prints the SQL that is executed',
action="store_true")
def init_db(emit=False):
""" Resets entire database to empty state """
if emit:
import logging
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
res = raw_input("You shouldn't probably ever do this in production! Are you"
" really, really sure you want to reset the DB {}? [y/n] "
.format(db.engine))
if res != "y":
return
else:
db.session.commit()
db.drop_all()
db.create_all()
stamp()
@manager.command
def list_donation_perc():
""" Gives a summary of number of users at each donation amount """
summ = {}
warn = False
for entry in UserSettings.query.all():
summ.setdefault(entry.pdonation_perc, 0)
summ[entry.pdonation_perc] += 1
if entry.pdonation_perc < 0:
warn = True
if warn:
print("WARNING: A user has set a donation percentage below 0!")
print "User fee summary"
print "\n".join(["{0:.2f}% donation from {1} users"
.format(k * 100, v) for k, v in sorted(summ.items())])
@manager.option("--currency", type=str, dest="currency", default=None)
@manager.option('stop_id', type=int)
@manager.option('start_id', type=int)
def del_payouts(start_id, stop_id, currency=None):
"""
Deletes payouts between start and stop id and removes their id from the
associated Credits.
Expects a start and stop payout id for payouts to be deleted
If currency is passed, only payout matching that currency will be removed
::Warning:: This can really fuck things up!
"""
from simplecoin.models import Payout
payouts = Payout.query.filter(Payout.id >= start_id,
Payout.id <= stop_id).all()
if currency is not None:
payouts = [payout for payout in payouts if payout.currency == currency]
pids = [payout.id for payout in payouts]
credits = Credit.query.filter(Credit.payout_id.in_(pids)).all()
for credit in credits:
credit.payout = None
if credit.block and credit.block.orphan:
credit.payable = False
db.session.flush()
for payout in payouts:
print "ID: {} ### USER: {} ### CREATED: {} ### AMOUNT: {} ### " \
"CREDIT_COUNT: {}".format(payout.id, payout.user,
payout.created_at, payout.amount,
payout.count)
db.session.delete(payout)
print "Preparing to delete {} Payouts.".format(len(pids))
res = raw_input("Are you really sure you want to delete these payouts? [y/n] ")
if res != "y":
db.session.rollback()
return
db.session.commit()
@manager.option("currency", type=str)
def update_trade_requests(currency):
"""
Looks at all uncompleted sell requests for a currency and
re-looks at their credits.
A Trade request's credits should always be payable - but this can
get messed up if there is a long chain of orphans that is only later
discovered (after the trade request is generated). This can happen from
a daemon being on the wrong fork for a while, and then switching to the
'official' fork.
It is important that this function be run AFTER running update_block_state
and del_payouts, which check the maturity of blocks & removes old incorrect
payouts
If any credits in the TR are discovered to now not be payable then subtract
that credit amount from the TR.
"""
from simplecoin.models import TradeRequest
trs = TradeRequest.query.filter_by(_status=0, currency=currency,
type="sell").all()
adjustment = {}
for tr in trs:
for credit in tr.credits[:]:
if credit.payable is False:
print "Found unpayable credit for {} {} on TR #{}".format(credit.amount, credit.currency, tr.id)
tr.quantity -= credit.amount
tr.credits.remove(credit)
adjustment.setdefault(tr.id, 0)
adjustment[tr.id] -= credit.amount
if adjustment:
print "Preparing to update TRs: {}.".format(adjustment)
else:
print "Nothing to update...exiting."
exit(0)
res = raw_input("Are you really sure you want to perform this update? [y/n] ")
if res != "y" and res != "yes":
db.session.rollback()
return
db.session.commit()
@manager.option('input', type=argparse.FileType('r'))
def import_shares(input):
for i, line in enumerate(input):
data = json.loads(line)
data['time'] = datetime.datetime.utcfromtimestamp(data['time'])
slc = ShareSlice(algo="scrypt", **data)
floored = DeviceSlice.floor_time(data['time'], data['span'])
if data['time'] != floored:
current_app.logger.warn("{} != {}".format(data['time'], floored))
data['time'] = floored
db.session.add(slc)
if i % 100 == 0:
print "{} completed".format(i)
db.session.commit()
@manager.option('input', type=argparse.FileType('r'))
def import_device_slices(input):
for i, row in enumerate(input):
data = json.loads(row)
data['time'] = datetime.datetime.utcfromtimestamp(data['time'])
data['stat'] = data.pop('_stat')
# Do a basic integrity check
floored = DeviceSlice.floor_time(data['time'], data['span'])
if data['time'] != floored:
current_app.logger.warn("{} != {}".format(data['time'], floored))
data['time'] = floored
db.session.add(DeviceSlice(**data))
# Print periodic progress
if i % 100 == 0:
db.session.commit()
print("{} inserted!".format(i))
@manager.command
def dump_effective_config():
import pprint
pprint.pprint(dict(current_app.config))
@manager.option('host')
def forward_coinservs(host):
""" Given a hostname, connects to a remote and tunnels all coinserver ports
to local ports. Useful for development testing. """
args = [host, "-N"]
for currency in currencies.itervalues():
if not currency.coinserv:
continue
args.append("-L {0}:127.0.0.1:{0}"
.format(currency.coinserv.config['port']))
for pp in powerpools.itervalues():
parts = urlparse(pp.monitor_address)
if parts.hostname not in ['localhost', '127.0.0.1']:
continue
args.append("-L {0}:127.0.0.1:{0}".format(parts.port))
current_app.logger.info(("/usr/bin/ssh", "/usr/bin/ssh", args))
os.execl("/usr/bin/ssh", "/usr/bin/ssh", *args)
@manager.option('-ds', '--dont-simulate', default=False,
action="store_true")
def convert_unexchangeable(dont_simulate):
""" Converts Credit exchanges for unexchangeable currencies to payout the
pool.
XXX: Now broken due to config refactor """
unexchangeable = []
for currency in currencies.itervalues():
# Skip unused currencies
if not currency.coinserv:
continue
if not currency.exchangeable:
unexchangeable.append((currency.key, currency.pool_payout))
current_app.logger.info("Looking for CreditExchange's for currencies {}"
.format(unexchangeable))
for key, pool_payout in unexchangeable:
blocks = {}
hashes = set()
for ce in (CreditExchange.query.join(CreditExchange.block, aliased=True).
filter_by(currency=key)):
blocks.setdefault(ce.block, [0, []])
hashes.add(ce.block.hash)
blocks[ce.block][0] += ce.amount
blocks[ce.block][1].append(ce)
db.session.delete(ce)
# Sanity check, make sure block objs as keys is valid
assert len(hashes) == len(blocks)
for block, (amount, credits) in blocks.iteritems():
# Create a new credit for the pool to displace the deleted
# CreditExchanges. It will always be a credit since the currency is
# unexchangeable
pool_block = Credit(
source=0,
address=pool_payout['address'],
user=pool_payout['user'],
currency=pool_payout['currency'].key,
amount=amount,
block_id=block.id,
payable=block.mature)
db.session.add(pool_block)
current_app.logger.info(
"Block {} status {} value {} removed {} CreditExchanges of {} total amount"
.format(block, block.status, block.total_value, len(credits), amount))
current_app.logger.info("For currency {}, updated {} blocks"
.format(key, len(blocks)))
if dont_simulate is True:
current_app.logger.info("Committing transaction!")
db.session.commit()
else:
current_app.logger.info("Rolling back!")
db.session.rollback()
@manager.option('-t', '--txid', dest='transaction_id')
def confirm_trans(transaction_id):
""" Manually confirms a transaction. Shouldn't be needed in normal use. """
trans = Transaction.query.filter_by(txid=transaction_id).first()
trans.confirmed = True
db.session.commit()
def make_context():
""" Setup a coinserver connection fot the shell context """
app = _request_ctx_stack.top.app
import simplecoin.models as m
return dict(app=app, currencies=currencies, powerpools=powerpools, m=m, db=db)
manager.add_command("shell", Shell(make_context=make_context))
manager.add_command("runserver", Server())
manager.add_command('db', MigrateCommand)
manager.add_command('scheduler', SchedulerCommand)
manager.add_option('-c', '--config', dest='configs', action='append',
type=argparse.FileType('r'))
manager.add_option('-l', '--log-level',
choices=['DEBUG', 'INFO', 'WARN', 'ERROR'], default='INFO')
if __name__ == "__main__":
manager.run()
| mit | -1,270,621,371,326,269,000 | 33.98366 | 112 | 0.611397 | false |
eeshangarg/oh-mainline | vendor/packages/twisted/twisted/words/xish/xmlstream.py | 18 | 8526 | # -*- test-case-name: twisted.words.test.test_xmlstream -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
XML Stream processing.
An XML Stream is defined as a connection over which two XML documents are
exchanged during the lifetime of the connection, one for each direction. The
unit of interaction is a direct child element of the root element (stanza).
The most prominent use of XML Streams is Jabber, but this module is generically
usable. See Twisted Words for Jabber specific protocol support.
Maintainer: Ralph Meijer
"""
from twisted.python import failure
from twisted.internet import protocol
from twisted.words.xish import domish, utility
STREAM_CONNECTED_EVENT = intern("//event/stream/connected")
STREAM_START_EVENT = intern("//event/stream/start")
STREAM_END_EVENT = intern("//event/stream/end")
STREAM_ERROR_EVENT = intern("//event/stream/error")
class XmlStream(protocol.Protocol, utility.EventDispatcher):
""" Generic Streaming XML protocol handler.
This protocol handler will parse incoming data as XML and dispatch events
accordingly. Incoming stanzas can be handled by registering observers using
XPath-like expressions that are matched against each stanza. See
L{utility.EventDispatcher} for details.
"""
def __init__(self):
utility.EventDispatcher.__init__(self)
self.stream = None
self.rawDataOutFn = None
self.rawDataInFn = None
def _initializeStream(self):
""" Sets up XML Parser. """
self.stream = domish.elementStream()
self.stream.DocumentStartEvent = self.onDocumentStart
self.stream.ElementEvent = self.onElement
self.stream.DocumentEndEvent = self.onDocumentEnd
### --------------------------------------------------------------
###
### Protocol events
###
### --------------------------------------------------------------
def connectionMade(self):
""" Called when a connection is made.
Sets up the XML parser and dispatches the L{STREAM_CONNECTED_EVENT}
event indicating the connection has been established.
"""
self._initializeStream()
self.dispatch(self, STREAM_CONNECTED_EVENT)
def dataReceived(self, data):
""" Called whenever data is received.
Passes the data to the XML parser. This can result in calls to the
DOM handlers. If a parse error occurs, the L{STREAM_ERROR_EVENT} event
is called to allow for cleanup actions, followed by dropping the
connection.
"""
try:
if self.rawDataInFn:
self.rawDataInFn(data)
self.stream.parse(data)
except domish.ParserError:
self.dispatch(failure.Failure(), STREAM_ERROR_EVENT)
self.transport.loseConnection()
def connectionLost(self, reason):
""" Called when the connection is shut down.
Dispatches the L{STREAM_END_EVENT}.
"""
self.dispatch(reason, STREAM_END_EVENT)
self.stream = None
### --------------------------------------------------------------
###
### DOM events
###
### --------------------------------------------------------------
def onDocumentStart(self, rootElement):
""" Called whenever the start tag of a root element has been received.
Dispatches the L{STREAM_START_EVENT}.
"""
self.dispatch(self, STREAM_START_EVENT)
def onElement(self, element):
""" Called whenever a direct child element of the root element has
been received.
Dispatches the received element.
"""
self.dispatch(element)
def onDocumentEnd(self):
""" Called whenever the end tag of the root element has been received.
Closes the connection. This causes C{connectionLost} being called.
"""
self.transport.loseConnection()
def setDispatchFn(self, fn):
""" Set another function to handle elements. """
self.stream.ElementEvent = fn
def resetDispatchFn(self):
""" Set the default function (C{onElement}) to handle elements. """
self.stream.ElementEvent = self.onElement
def send(self, obj):
""" Send data over the stream.
Sends the given C{obj} over the connection. C{obj} may be instances of
L{domish.Element}, L{unicode} and L{str}. The first two will be
properly serialized and/or encoded. L{str} objects must be in UTF-8
encoding.
Note: because it is easy to make mistakes in maintaining a properly
encoded L{str} object, it is advised to use L{unicode} objects
everywhere when dealing with XML Streams.
@param obj: Object to be sent over the stream.
@type obj: L{domish.Element}, L{domish} or L{str}
"""
if domish.IElement.providedBy(obj):
obj = obj.toXml()
if isinstance(obj, unicode):
obj = obj.encode('utf-8')
if self.rawDataOutFn:
self.rawDataOutFn(obj)
self.transport.write(obj)
class BootstrapMixin(object):
"""
XmlStream factory mixin to install bootstrap event observers.
This mixin is for factories providing
L{IProtocolFactory<twisted.internet.interfaces.IProtocolFactory>} to make
sure bootstrap event observers are set up on protocols, before incoming
data is processed. Such protocols typically derive from
L{utility.EventDispatcher}, like L{XmlStream}.
You can set up bootstrap event observers using C{addBootstrap}. The
C{event} and C{fn} parameters correspond with the C{event} and
C{observerfn} arguments to L{utility.EventDispatcher.addObserver}.
@since: 8.2.
@ivar bootstraps: The list of registered bootstrap event observers.
@type bootstrap: C{list}
"""
def __init__(self):
self.bootstraps = []
def installBootstraps(self, dispatcher):
"""
Install registered bootstrap observers.
@param dispatcher: Event dispatcher to add the observers to.
@type dispatcher: L{utility.EventDispatcher}
"""
for event, fn in self.bootstraps:
dispatcher.addObserver(event, fn)
def addBootstrap(self, event, fn):
"""
Add a bootstrap event handler.
@param event: The event to register an observer for.
@type event: C{str} or L{xpath.XPathQuery}
@param fn: The observer callable to be registered.
"""
self.bootstraps.append((event, fn))
def removeBootstrap(self, event, fn):
"""
Remove a bootstrap event handler.
@param event: The event the observer is registered for.
@type event: C{str} or L{xpath.XPathQuery}
@param fn: The registered observer callable.
"""
self.bootstraps.remove((event, fn))
class XmlStreamFactoryMixin(BootstrapMixin):
"""
XmlStream factory mixin that takes care of event handlers.
All positional and keyword arguments passed to create this factory are
passed on as-is to the protocol.
@ivar args: Positional arguments passed to the protocol upon instantiation.
@type args: C{tuple}.
@ivar kwargs: Keyword arguments passed to the protocol upon instantiation.
@type kwargs: C{dict}.
"""
def __init__(self, *args, **kwargs):
BootstrapMixin.__init__(self)
self.args = args
self.kwargs = kwargs
def buildProtocol(self, addr):
"""
Create an instance of XmlStream.
The returned instance will have bootstrap event observers registered
and will proceed to handle input on an incoming connection.
"""
xs = self.protocol(*self.args, **self.kwargs)
xs.factory = self
self.installBootstraps(xs)
return xs
class XmlStreamFactory(XmlStreamFactoryMixin,
protocol.ReconnectingClientFactory):
"""
Factory for XmlStream protocol objects as a reconnection client.
"""
protocol = XmlStream
def buildProtocol(self, addr):
"""
Create a protocol instance.
Overrides L{XmlStreamFactoryMixin.buildProtocol} to work with
a L{ReconnectingClientFactory}. As this is called upon having an
connection established, we are resetting the delay for reconnection
attempts when the connection is lost again.
"""
self.resetDelay()
return XmlStreamFactoryMixin.buildProtocol(self, addr)
| agpl-3.0 | 2,892,796,734,866,596,000 | 31.666667 | 79 | 0.638635 | false |
temetherian/omnomic | comm.py | 1 | 3762 | #!/usr/bin/python
import base64
import json
import os
import re
import time
import uuid
from google.appengine.api import app_identity
try:
from functools import lru_cache
except ImportError:
from functools32 import lru_cache
import httplib2
from oauth2client.client import GoogleCredentials
_FIREBASE_SCOPES = [
'https://www.googleapis.com/auth/firebase.database',
'https://www.googleapis.com/auth/userinfo.email']
_FIREBASE_CONFIG = '_firebase_config.html'
_IDENTITY_ENDPOINT = ('https://identitytoolkit.googleapis.com/'
'google.identity.identitytoolkit.v1.IdentityToolkit')
# Memoize the authorized http, to avoid fetching new access tokens
@lru_cache()
def _get_http():
"""Provides an authed http object."""
http = httplib2.Http()
# Use application default credentials to make the Firebase calls
# https://firebase.google.com/docs/reference/rest/database/user-auth
creds = GoogleCredentials.get_application_default().create_scoped(
_FIREBASE_SCOPES)
creds.authorize(http)
return http
@lru_cache()
def _get_firebase_db_url():
"""Grabs the databaseURL from the Firebase config snippet. Regex looks
scary, but all it is doing is pulling the 'databaseURL' field from the
Firebase javascript snippet"""
regex = re.compile(r'\bdatabaseURL\b.*?["\']([^"\']+)')
cwd = os.path.dirname(__file__)
try:
with open(os.path.join(cwd, 'templates', _FIREBASE_CONFIG)) as f:
url = next(regex.search(line) for line in f if regex.search(line))
except StopIteration:
raise ValueError(
'Error parsing databaseURL. Please copy Firebase web snippet '
'into templates/{}'.format(_FIREBASE_CONFIG))
return url.group(1)
def create_custom_token(channel_id, valid_minutes=60):
"""Create a secure token for the given id.
This method is used to create secure custom JWT tokens to be passed to
clients. It takes a unique id (uid) that will be used by Firebase's
security rules to prevent unauthorized access. In this case, the uid will
be the channel id which is a combination of user_id and game_key
"""
# use the app_identity service from google.appengine.api to get the
# project's service account email automatically
client_email = app_identity.get_service_account_name()
now = int(time.time())
# encode the required claims
# per https://firebase.google.com/docs/auth/server/create-custom-tokens
payload = base64.b64encode(json.dumps({
'iss': client_email,
'sub': client_email,
'aud': _IDENTITY_ENDPOINT,
'uid': channel_id,
'iat': now,
'exp': now + (valid_minutes * 60),
}))
# add standard header to identify this as a JWT
header = base64.b64encode(json.dumps({'typ': 'JWT', 'alg': 'RS256'}))
to_sign = '{}.{}'.format(header, payload)
# Sign the jwt using the built in app_identity service
return '{}.{}'.format(to_sign, base64.b64encode(
app_identity.sign_blob(to_sign)[1]))
def MaybeSendToChannel(channel_id, msg):
"""Send to channel if it exists, otherwise ignore it."""
if not channel_id:
return
url = '{}/channels/{}.json'.format(_get_firebase_db_url(), channel_id)
return _get_http().request(url, 'PUT', body=msg)
def DeleteChannel(channel_id):
"""Send to channel if it exists, otherwise ignore it."""
if not channel_id:
return
url = '{}/channels/{}.json'.format(_get_firebase_db_url(), channel_id)
return _get_http().request(url, 'DELETE')
#############
# Datastore #
#############
class PlayerClient(ndb.Model):
player_id = ndb.StringProperty()
auth_token = ndb.StringProperty(indexed=False)
channel = ndb.StringProperty(indexed=False)
| gpl-3.0 | 3,970,345,660,071,770,600 | 31.713043 | 78 | 0.675439 | false |
dhcrzf/zulip | zerver/lib/widget.py | 2 | 2238 | from typing import MutableMapping, Any, Optional, List, Tuple
from django.conf import settings
import re
import json
from zerver.models import SubMessage
def get_widget_data(content: str) -> Tuple[Optional[str], Optional[str]]:
valid_widget_types = ['tictactoe', 'poll', 'todo']
tokens = content.split(' ')
# tokens[0] will always exist
if tokens[0].startswith('/'):
widget_type = tokens[0][1:]
if widget_type in valid_widget_types:
extra_data = get_extra_data_from_widget_type(tokens, widget_type)
return widget_type, extra_data
return None, None
def get_extra_data_from_widget_type(tokens: List[str],
widget_type: Optional[str]) -> Any:
if widget_type == 'poll':
# This is used to extract the question from the poll command.
# The command '/poll question' will pre-set the question in the poll
question = ' '.join(tokens[1:])
if not question:
question = ''
extra_data = {'question': question}
return extra_data
return None
def do_widget_post_save_actions(message: MutableMapping[str, Any]) -> None:
'''
This is experimental code that only works with the
webapp for now.
'''
if not settings.ALLOW_SUB_MESSAGES:
return
content = message['message'].content
sender_id = message['message'].sender_id
message_id = message['message'].id
widget_type = None
extra_data = None
widget_type, extra_data = get_widget_data(content)
widget_content = message.get('widget_content')
if widget_content is not None:
# Note that we validate this data in check_message,
# so we can trust it here.
widget_type = widget_content['widget_type']
extra_data = widget_content['extra_data']
if widget_type:
content = dict(
widget_type=widget_type,
extra_data=extra_data
)
submessage = SubMessage(
sender_id=sender_id,
message_id=message_id,
msg_type='widget',
content=json.dumps(content),
)
submessage.save()
message['submessages'] = SubMessage.get_raw_db_rows([message_id])
| apache-2.0 | 5,613,467,135,951,827,000 | 31.434783 | 77 | 0.613941 | false |
PaddlePaddle/Paddle | python/paddle/distributed/fleet/base/distributed_strategy.py | 1 | 62903 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.distributed.fleet.proto import distributed_strategy_pb2
from paddle.fluid.framework import Variable, set_flags, core, _global_flags
from paddle.fluid.wrapped_decorator import wrap_decorator
import google.protobuf.text_format
import google.protobuf
__all__ = []
non_auto_func_called = True
def __non_auto_func_called__(func):
def __impl__(*args, **kwargs):
global non_auto_func_called
non_auto_func_called = False
return func(*args, **kwargs)
return __impl__
is_strict_auto = wrap_decorator(__non_auto_func_called__)
def get_msg_dict(msg):
res_dict = {}
fields = msg.DESCRIPTOR.fields
for f in fields:
res_dict[f.name] = getattr(msg, f.name)
return res_dict
def assign_configs_value(msg, config):
fields = msg.DESCRIPTOR.fields
for key in config:
for f in fields:
if key == f.name:
# LABEL_OPTIONAL = 1
# LABEL_REPEATED = 3
# LABEL_REQUIRED = 2
if f.label == 3:
getattr(msg, f.name).extend(config[f.name])
elif f.label == 1 or f.label == 2:
setattr(msg, f.name, config[f.name])
def check_configs_key(msg, config, field_name):
key_list = msg.DESCRIPTOR.fields_by_name.keys()
for key in config:
assert key in key_list, "key:{} not in {}".format(key, field_name)
class DistributedJobInfo(object):
"""
DistributedJobInfo will serialize all distributed training information
Just for inner use: 1) debug 2) replicate experiments
"""
def __init__(self):
self.job_info = distributed_strategy_pb2.DistributedJobInfo()
def _set_worker_num(self, worker_num):
self.job_info.worker_num = worker_num
def _set_server_num(self, server_num):
self.job_info.server_num = server_num
def _set_worker_ips(self, worker_ips):
self.job_info.worker_ips.extend(worker_ips)
def _set_server_endpoints(self, server_endpoints):
self.job_info.server_endpoints.extend(server_endpoints)
def _set_origin_startup(self, origin_startup_prog):
self.job_info.origin_startup = str(origin_startup_prog)
def _set_origin_main(self, origin_main_prog):
self.job_info.origin_main = str(origin_main_prog)
def _distributed_main(self, distributed_main_prog):
self.job_info.distributed_main = str(distributed_main_prog)
def _optimizer_name(self, optimizer_name):
self.job_info.optimizer_name = optimizer_name
def _set_distributed_strategy(self, dist_strategy):
self.job_info.strategy = dist_strategy
class DistributedStrategy(object):
__lock_attr = False
def __init__(self):
"""
DistributedStrategy is the main configuration entry for distributed training of Paddle.
All of the distributed training configurations can be configured in DistributedStrategy,
such as automatic mixed precision (AMP), Layer-wise Adaptive Rate Scaling (LARS),
asynchronous update parameter server(ASGD), etc.
DistributedStrategy can be serialized into protobuf file or deserialized from protobuf file
Users who run local training usually configure BuildStrategy and ExecutionStrategy, and
DistributedStrategy supports configurations from BuildStrategy and ExecutionStrategy
"""
self.strategy = distributed_strategy_pb2.DistributedStrategy()
# Set the default values of the following flags to the ones set by users
key = 'FLAGS_cudnn_batchnorm_spatial_persistent'
if _global_flags().is_public(key):
self.strategy.cudnn_batchnorm_spatial_persistent = bool(
_global_flags()[key])
key = 'FLAGS_conv_workspace_size_limit'
if _global_flags().is_public(key):
self.strategy.conv_workspace_size_limit = int(_global_flags()[key])
key = 'FLAGS_cudnn_exhaustive_search'
if _global_flags().is_public(key):
self.strategy.cudnn_exhaustive_search = bool(_global_flags()[key])
key = 'FLAGS_sync_nccl_allreduce'
if _global_flags().is_public(key):
self.strategy.sync_nccl_allreduce = bool(_global_flags()[key])
self.__lock_attr = True
def __setattr__(self, key, value):
if self.__lock_attr and not hasattr(self, key):
raise TypeError("%s is not a attribute of %s" %
(key, self.__class__.__name__))
object.__setattr__(self, key, value)
def save_to_prototxt(self, output):
"""
Serialize current DistributedStrategy to string and save to output file
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.dgc = True
strategy.recompute = True
strategy.recompute_configs = {"checkpoints": ["x"]}
strategy.save_to_prototxt("dist_strategy.prototxt")
"""
with open(output, "w") as fout:
fout.write(str(self.strategy))
def load_from_prototxt(self, pb_file):
"""
Load from prototxt file for DistributedStrategy initialization
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.load_from_prototxt("dist_strategy.prototxt")
"""
with open(pb_file, 'r') as f:
self.strategy = google.protobuf.text_format.Merge(
str(f.read()), self.strategy)
@property
def execution_strategy(self):
"""
Configure ExecutionStrategy for DistributedStrategy
Examples:
.. code-block:: python
import paddle
exe_strategy = paddle.static.ExecutionStrategy()
exe_strategy.num_threads = 10
exe_strategy.num_iteration_per_drop_scope = 10
exe_strategy.num_iteration_per_run = 10
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.execution_strategy = exe_strategy
"""
execution_strategy = paddle.fluid.ExecutionStrategy()
fields = self.strategy.execution_strategy.DESCRIPTOR.fields
for f in fields:
setattr(execution_strategy, f.name,
getattr(self.strategy.execution_strategy, f.name))
return execution_strategy
@execution_strategy.setter
@is_strict_auto
def execution_strategy(self, strategy):
fields = self.strategy.execution_strategy.DESCRIPTOR.fields
for f in fields:
setattr(self.strategy.execution_strategy, f.name,
getattr(strategy, f.name))
@property
def build_strategy(self):
"""
Configure BuildStrategy for DistributedStrategy
Note that the properties of BuildStrategy are valid in DistributedStrategy
only if the property is non-distributed strategy.
Examples:
.. code-block:: python
import paddle
build_strategy = paddle.static.BuildStrategy()
build_strategy.enable_sequential_execution = True
build_strategy.fuse_elewise_add_act_ops = True
build_strategy.fuse_bn_act_ops = True
build_strategy.enable_auto_fusion = True
build_strategy.fuse_relu_depthwise_conv = True
build_strategy.fuse_broadcast_ops = True
build_strategy.fuse_all_optimizer_ops = True
build_strategy.enable_inplace = True
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.build_strategy = build_strategy
"""
build_strategy = paddle.fluid.BuildStrategy()
fields = self.strategy.build_strategy.DESCRIPTOR.fields
for f in fields:
setattr(build_strategy, f.name,
getattr(self.strategy.build_strategy, f.name))
return build_strategy
@build_strategy.setter
@is_strict_auto
def build_strategy(self, strategy):
fields = self.strategy.build_strategy.DESCRIPTOR.fields
for f in fields:
if f.label == 1 or f.label == 2: # optional and required field
setattr(self.strategy.build_strategy, f.name,
getattr(strategy, f.name))
elif f.label == 3: # repeated field
getattr(self.strategy.build_strategy,
f.name).extend(getattr(strategy, f.name))
@property
def gradient_scale_configs(self):
"""
Set the strategy of gradient scale
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.gradient_scale_configs = {'scale_strategy': 'avg'}
Note that, strategy must be in 'avg', 'sum' or 'customized'
"""
return get_msg_dict(self.strategy.gradient_scale_configs)
@gradient_scale_configs.setter
@is_strict_auto
def gradient_scale_configs(self, config):
check_configs_key(self.strategy.gradient_scale_configs, config,
'gradient_scale_configs')
assign_configs_value(self.strategy.gradient_scale_configs, config)
@property
def a_sync(self):
"""
Indicating whether we are using asynchronous stocastic gradient descent updates
for training. This property is valid when we are using parameter server training,
which is implied by setting approperate RoleMaker
Default value: True
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
role_maker = fleet.PaddleCloudRoleMaker()
fleet.init(role_maker)
strategy = fleet.DistributedStrategy()
strategy.a_sync = True # by default this is True
# code block for defining loss and local optimizer
# sgd = fleet.distributed_optimizer(optimizer, strategy)
"""
return self.strategy.a_sync
@a_sync.setter
@is_strict_auto
def a_sync(self, flag):
if isinstance(flag, bool):
self.strategy.a_sync = flag
self.a_sync_configs = {"k_steps": 0}
else:
raise ValueError(
"The type of `flag` is invalid, expected type is bool, but received {}".
format(type(flag)))
@property
def a_sync_configs(self):
"""
Set a_sync update configurations. In general, asynchronous parameter server
training has serveral configurable settings that can be configured through
a dict.
**Notes**:
k_step(int): number of local optimization updates before communication
max_merge_var_num(int): maximum number of merged gradients before communication
send_queue_size(int): a buffer size of worker communication
independent_recv_thread(bool): if we are using independent recv thread for communication
thread_pool_size(int): number of thread pool
send_wait_times(int): waiting time for sending gradients
runtime_split_send_recv(bool): if we are using Tensor split for send and recv during runtime
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
role_maker = fleet.PaddleCloudRoleMaker()
fleet.init(role_maker)
strategy = fleet.DistributedStrategy()
strategy.a_sync = True # by default this is True
configs = {"k_steps": 1024, "send_queue_size": 32}
strategy.a_sync_configs = configs
# code block for defining loss and local optimizer
# sgd = fleet.distributed_optimizer(optimizer, strategy)
"""
return get_msg_dict(self.strategy.a_sync_configs)
@a_sync_configs.setter
@is_strict_auto
def a_sync_configs(self, configs):
check_configs_key(self.strategy.a_sync_configs, configs,
"a_sync_configs")
assign_configs_value(self.strategy.a_sync_configs, configs)
@property
def amp(self):
"""
Indicating whether we are using automatic mixed precision training
Default Value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.amp = True # by default this is false
"""
return self.strategy.amp
@amp.setter
@is_strict_auto
def amp(self, flag):
if isinstance(flag, bool):
self.strategy.amp = flag
else:
print("WARNING: amp should have value of bool type")
@property
def amp_configs(self):
"""
Set automatic mixed precision training configurations. In general, amp has serveral configurable
settings that can be configured through a dict.
**Notes**:
init_loss_scaling(float): The initial loss scaling factor. Default 32768.
use_dynamic_loss_scaling(bool): Whether to use dynamic loss scaling. Default True.
incr_every_n_steps(int): Increases loss scaling every n consecutive steps with finite gradients. Default 1000.
decr_every_n_nan_or_inf(int): Decreases loss scaling every n accumulated steps with nan or inf gradients. Default 2.
incr_ratio(float): The multiplier to use when increasing the loss scaling. Default 2.0.
decr_ratio(float): The less-than-one-multiplier to use when decreasing the loss scaling. Default 0.5.
custom_white_list(list[str]): Users' custom white list which always execution fp16.
custom_black_list(list[str]): Users' custom black list which forbidden execution fp16.
custom_black_varnames(list[str]): Users' custom black varibles' names.
use_pure_fp16(bool): Whether to use the pure fp16 training. Default False.
use_fp16_guard(bool): Whether to use `fp16_guard` when constructing the program.
Default True. Only takes effect when `use_pure_fp16` is turned on.
Examples 1:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.amp = True
strategy.amp_configs = {
"init_loss_scaling": 32768,
"custom_white_list": ['conv2d']}
Examples 2:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.amp = True
# pure fp16
strategy.amp_configs = {
"init_loss_scaling": 32768,
"use_pure_fp16": True
}
"""
return get_msg_dict(self.strategy.amp_configs)
@amp_configs.setter
@is_strict_auto
def amp_configs(self, configs):
check_configs_key(self.strategy.amp_configs, configs, "amp_configs")
assign_configs_value(self.strategy.amp_configs, configs)
@property
def asp(self):
"""
Indicating whether we are using automatic sparsity training
Default Value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.asp = True # by default this is false
"""
return self.strategy.asp
@asp.setter
@is_strict_auto
def asp(self, flag):
if isinstance(flag, bool):
self.strategy.asp = flag
else:
print("WARNING: asp should have value of bool type")
@property
def recompute(self):
"""
Indicating whether we are using forward recomputation for memory optimization
Default value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.recompute = True
# suppose x and y are names of checkpoint tensors for recomputation
strategy.recompute_configs = {"checkpoints": ["x", "y"]}
"""
return self.strategy.recompute
@property
def sync_nccl_allreduce(self):
"""
Indicating whether we are using synchronized all reduce in each communication thread
We note that system overhead is usually lower when sync_nccl_allreduce = True
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.sync_nccl_allreduce = True
"""
return self.strategy.sync_nccl_allreduce
@sync_nccl_allreduce.setter
@is_strict_auto
def sync_nccl_allreduce(self, flag):
if isinstance(flag, bool):
self.strategy.sync_nccl_allreduce = flag
else:
print("WARNING: sync_nccl_allreduce should have value of bool type")
@property
def use_hierarchical_allreduce(self):
"""
Indicating whether we are using hierarchical allreduce in collective communication
Hierarchical allreduce often does allreduce within a certain node group and then do
allreduce among the leaders of each group
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.use_hierarchical_allreduce = True
"""
return self.strategy.use_hierarchical_allreduce
@use_hierarchical_allreduce.setter
@is_strict_auto
def use_hierarchical_allreduce(self, flag):
if isinstance(flag, bool):
self.strategy.use_hierarchical_allreduce = flag
else:
print(
"WARNING: use_hierarchical_allreduce should have value of bool type"
)
@property
def hierarchical_allreduce_inter_nranks(self):
"""
Number of ranks for low level node groups in hierarchical allreduce
Default value: number of GPU cards on each single GPU machine
Example:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.hierarchical_allreduce_inter_nranks = 8
"""
return self.strategy.hierarchical_allreduce_inter_nranks
@hierarchical_allreduce_inter_nranks.setter
@is_strict_auto
def hierarchical_allreduce_inter_nranks(self, value):
if isinstance(value, int):
self.strategy.hierarchical_allreduce_inter_nranks = value
else:
print(
"WARNING: hierarchical_allreduce_inter_nranks should have value of int type"
)
@property
def sync_batch_norm(self):
"""
Indicating whether we are using sync_batch_norm to do synchronous batch normalization among all training nodes.
Default value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.sync_batch_norm = True
"""
return self.strategy.sync_batch_norm
@sync_batch_norm.setter
@is_strict_auto
def sync_batch_norm(self, flag):
if isinstance(flag, bool):
self.strategy.sync_batch_norm = flag
else:
print("WARNING: sync_batch_norm should have value of bool type")
@property
def fuse_all_reduce_ops(self):
"""
Indicating whether we are using fuse_all_reduce_ops for gradient fusion during backward phase of training
Default value: True
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.fuse_all_reduce_ops = False
"""
return self.strategy.fuse_all_reduce_ops
@fuse_all_reduce_ops.setter
@is_strict_auto
def fuse_all_reduce_ops(self, flag):
if isinstance(flag, bool):
self.strategy.fuse_all_reduce_ops = flag
else:
print("WARNING: fuse_all_reduce_ops should have value of bool type")
@property
def fuse_grad_size_in_MB(self):
"""
Specifying the size of gradient to fuse in Mega-Bytes
Default value: 32
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.fuse_grad_size_in_MB = 50
"""
return self.strategy.fuse_grad_size_in_MB
@fuse_grad_size_in_MB.setter
@is_strict_auto
def fuse_grad_size_in_MB(self, value):
if isinstance(value, int):
self.strategy.fuse_grad_size_in_MB = value
else:
print("WARNING: fuse_grad_size_in_MB should have value of int type")
@property
def last_comm_group_size_MB(self):
"""
Specifying the size of gradient to fuse in Mega-Bytes when
the last group of each batch communicates. Making the last group
small is useful to improve performance.
Default value: 1
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.last_comm_group_size_MB = 2
"""
return self.strategy.last_comm_group_size_MB
@last_comm_group_size_MB.setter
@is_strict_auto
def last_comm_group_size_MB(self, value):
if value > 0:
self.strategy.last_comm_group_size_MB = value
else:
raise ValueError("last_comm_group_size_MB should be greater than 0")
@property
def find_unused_parameters(self):
"""
Indicating whether we are using find_unused_parameters to
find unused parameters in DataParallel.
Default value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.find_unused_parameters = True
"""
return self.strategy.find_unused_parameters
@find_unused_parameters.setter
@is_strict_auto
def find_unused_parameters(self, flag):
if isinstance(flag, bool):
self.strategy.find_unused_parameters = flag
else:
print(
"WARNING: find_unused_parameters should have value of bool type")
@property
def _fuse_grad_size_in_TFLOPS(self):
return self.strategy.fuse_grad_size_in_TFLOPS
@_fuse_grad_size_in_TFLOPS.setter
@is_strict_auto
def _fuse_grad_size_in_TFLOPS(self, value):
if isinstance(value, float):
self.strategy.fuse_grad_size_in_TFLOPS = value
else:
print(
"WARNING: fuse_grad_size_in_TFLOPS should have value of float type"
)
@property
def nccl_comm_num(self):
"""
Specifying the number of NCCL communicator
Default value: 1
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.nccl_comm_num = 2
"""
return self.strategy.nccl_comm_num
@nccl_comm_num.setter
@is_strict_auto
def nccl_comm_num(self, value):
if isinstance(value, int):
self.strategy.nccl_comm_num = value
else:
print("WARNING: nccl_comm_num should have value of int type")
@recompute.setter
@is_strict_auto
def recompute(self, flag):
if isinstance(flag, bool):
self.strategy.recompute = flag
else:
print("WARNING: recompute should have value of bool type")
@property
def recompute_configs(self):
"""
Set recompute configurations.
**Note**:
checkpoints(list): list of string name of checkpoints. In general, the recompute
strategy of current implementation should have some manually assign checkpoints.
enable_offload(bool): enable recompute checkpoints offload feature. this feature
will offload the checkpoint to host memory to allow even larger batch size. since
the memcpy from host to device takes time, it is a trade off between larger batch
size and training speed.
checkpoint_shape(list): list of int that specific the shape of checkpoint. so far
recompute-offload requires that all checkpoint to be same shape, and every dimension
specific here should be determined ("-1" is not allowed).
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.recompute = True
strategy.recompute_configs = {
"checkpoints": ["x", "y"],
"enable_offload": True,
"checkpoint_shape": [100, 512, 1024] }
"""
return get_msg_dict(self.strategy.recompute_configs)
@recompute_configs.setter
@is_strict_auto
def recompute_configs(self, configs):
check_configs_key(self.strategy.recompute_configs, configs,
"checkpoint_configs")
assign_configs_value(self.strategy.recompute_configs, configs)
@property
def sharding(self):
"""
Indicating whether we are using sharding Optimizer for memory
optimization. We implement the sharding optimizer following the ZeRO-DP
idea from [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054).
Model parameters and Optimizer State are sharded into different ranks allowing to fit larger model.
In Hybrid parallelism scenario, we use sharding config as uniform API to set each parallelism.
Default value: False
Examples:
.. code-block:: python
import paddle.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.sharding = True
"""
return self.strategy.sharding
@sharding.setter
@is_strict_auto
def sharding(self, flag):
if isinstance(flag, bool):
self.strategy.sharding = flag
else:
print("WARNING: sharding should have value of bool type")
@property
def sharding_configs(self):
"""
Set sharding configurations.
**Note**:
sharding_segment_strategy(string, optional): strategy used to segment the program(forward & backward operations). two strategise are
available: "segment_broadcast_MB" and "segment_anchors". segment is a concept used in sharding to overlap computation and
communication. Default is segment_broadcast_MB.
segment_broadcast_MB(float, optional): segment by the parameters broadcast volume. sharding will introduce parameter broadcast operations into program, and
after every segment_broadcast_MB size parameter being broadcasted, the program will be cutted into one segment.
This configuration will affect the communication speed in sharding training, and should be an empirical value decided by your model size and network topology.
Only enable when sharding_segment_strategy = segment_broadcast_MB. Default is 32.0 .
segment_anchors(list): list of anchors used to segment the program, which allows a finner control of program segmentation.
this strategy is experimental by now. Only enable when sharding_segment_strategy = segment_anchors.
sharding_degree(int, optional): specific the number of gpus within each sharding parallelism group; and sharding will be turn off if sharding_degree=1. Default is 8.
gradient_merge_acc_step(int, optional): specific the accumulation steps in gradient merge; and gradient merge will be turn off if gradient_merge_acc_step=1. Default is 1.
optimize_offload(bool, optional): enable the optimizer offload which will offload the moment vars to Host memory in order to saving GPU memory for fitting larger model.
the moment var will be prefetch from and offloaded to Host memory during update stage. it is a stragtegy that trades off between training speed and GPU memory, and is recommened to be turn on only when gradient_merge_acc_step large, where
the number of time of update stage will be relatively small compared with forward&backward's. Default is False.
dp_degree(int, optional): specific the number of data parallelism group; when dp_degree >= 2, it will introduce dp_degree ways data parallelism as the outer parallelsim for the inner parallelsim. User is responsible to ensure global_world_size = mp_degree * sharding_degree * pp_degree * dp_degree. Default is 1.
mp_degree(int, optional): [Hybrid parallelism ONLY] specific the the number of gpus within each megatron parallelism group; and megatron parallelism will turn be off if mp_degree=1. Default is 1.
pp_degree(int, optional): [Hybrid parallelism ONLY] specific the the number of gpus within each pipeline parallelism group; and pipeline parallelism will turn be off if pp_degree=1. Default is 1.
pp_allreduce_in_optimize(bool, optional): [Hybrid parallelism ONLY] move the allreduce operations from backward stage to update(optimize) stage when pipeline parallelsim is on.
This configuration will affect the communication speed of Hybrid parallelism training depeneded on network topology. this strategy is experimental by now.. Default is False.
Examples:
.. code-block:: python
# sharding-DP, 2 nodes with 8 gpus per node
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.sharding = True
strategy.sharding_configs = {
"sharding_segment_strategy": "segment_broadcast_MB",
"segment_broadcast_MB": 32,
"sharding_degree": 8,
"dp_degree": 2,
"gradient_merge_acc_step": 4,
}
"""
return get_msg_dict(self.strategy.sharding_configs)
@sharding_configs.setter
@is_strict_auto
def sharding_configs(self, configs):
check_configs_key(self.strategy.sharding_configs, configs,
"sharding_configs")
assign_configs_value(self.strategy.sharding_configs, configs)
@property
def without_graph_optimization(self):
"""
Run program using Executor other than ParallelExecutor.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.without_graph_optimization = True
"""
return self.strategy.without_graph_optimization
@without_graph_optimization.setter
@is_strict_auto
def without_graph_optimization(self, flag):
if isinstance(flag, bool):
self.strategy.without_graph_optimization = flag
else:
print(
"WARNING: without_graph_optimization should have value of bool type"
)
@property
def _calc_comm_same_stream(self):
"""
This based on raw_program_optimizer program
Set whether use same stream for calc and comm when fuse allreduce
The default value for the calc_comm_same_stream is False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.calc_comm_same_stream = True
"""
return self.strategy.calc_comm_same_stream
@_calc_comm_same_stream.setter
@is_strict_auto
def _calc_comm_same_stream(self, same):
if isinstance(same, bool):
self.strategy.calc_comm_same_stream = same
else:
print(
"WARNING: calc_comm_same_stream should have value of boolean type"
)
@property
def fuse_grad_size_in_num(self):
"""
This based on raw_program_optimizer program and allreduce the num of the fused op
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.fuse_grad_size_in_num = 2
"""
return self.strategy.fuse_grad_size_in_num
@fuse_grad_size_in_num.setter
@is_strict_auto
def fuse_grad_size_in_num(self, num):
if isinstance(num, int):
self.strategy.fuse_grad_size_in_num = num
else:
print(
"WARNING: fuse_grad_size_in_num should have value of int32 type")
@property
def pipeline(self):
"""
Indicating whether we are using pipeline parallelism for distributed training.
Current implementation mainly focus on single GPU machine pipeline parallelism and
data parallelism across GPU machine. The pipeline information is indicated through
device_guard information in user-defined program.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.pipeline = True
"""
return self.strategy.pipeline
@pipeline.setter
@is_strict_auto
def pipeline(self, flag):
if isinstance(flag, bool):
self.strategy.pipeline = flag
else:
print("WARNING: pipeline should have value of bool type")
@property
def pipeline_configs(self):
"""
Set pipeline parallelism configurations. In pipeline parallelism,
different parts of neural networks are running on different GPUS.
There are Tensor queue buffer between each pair of neighborhood GPUS
that are responsible for synchronizing hidden Tensor results between
GPUs. Pipeline parallelism consists of serveral producer-consumer style
hardware pairs, such as GPU-GPU, CPU-GPU, GPU-XPU. The best way to speedup
pipeline parallelism is to make the size of Tensor in Tensor queue smaller,
so that we will have a faster producer for downstream consumers.
**Notes**:
**Detailed arguments for pipeline_configs**
**micro_batch_size**: the number of small batches in each user defined batch
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.pipeline = True
strategy.pipeline_configs = {"micro_batch_size": 12}
"""
return get_msg_dict(self.strategy.pipeline_configs)
@pipeline_configs.setter
@is_strict_auto
def pipeline_configs(self, configs):
check_configs_key(self.strategy.pipeline_configs, configs,
"pipeline_configs")
assign_configs_value(self.strategy.pipeline_configs, configs)
@property
def tensor_parallel(self):
"""
Indicating whether we are using tensor parallel for distributed training.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.tensor_parallel = True
"""
return self.strategy.tensor_parallel
@tensor_parallel.setter
@is_strict_auto
def tensor_parallel(self, flag):
if isinstance(flag, bool):
self.strategy.tensor_parallel = flag
else:
print("WARNING: tensor_parallel should have value of bool type")
@property
def tensor_parallel_configs(self):
"""
Set tensor_parallel configurations.
**Notes**:
**Detailed arguments for tensor_parallel_configs**
**tensor_parallel_degree**: degree of tensor parallel
**tensor_init_seed**: parameter initialization random seed
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.tensor_parallel = True
strategy.tensor_parallel_configs = {"tensor_parallel_degree": 4,
"tensor_init_seed": 123}
"""
return get_msg_dict(self.strategy.tensor_parallel_configs)
@tensor_parallel_configs.setter
@is_strict_auto
def tensor_parallel_configs(self, configs):
check_configs_key(self.strategy.tensor_parallel_configs, configs,
"tensor_parallel_configs")
assign_configs_value(self.strategy.tensor_parallel_configs, configs)
@property
def hybrid_configs(self):
"""
Dynamic graph hybrid parallel strategy configuration. Three-way hybrid parallelism
needs to meet the following relationships
total_number_GPUs = dp_degree * mp_degree * pp_degree
**Note**:
dp_degree(int): set number of GPUs in a data parallel group. Default -1.
This value should be an integer greater than 0.
If it is not set, or set to -1, its value will be inferred
based on the total number of cards.
mp_degree(int): set number of GPUs in a model parallel group. Default 1
pp_degree(int): set number of GPUs in a pipeline parallel group. Default 1
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.hybrid_configs = {
"dp_degree": 1,
"mp_degree": 2,
"pp_degree": 1}
"""
return get_msg_dict(self.strategy.hybrid_configs)
@hybrid_configs.setter
def hybrid_configs(self, configs):
check_configs_key(self.strategy.hybrid_configs, configs,
"hybrid_configs")
assign_configs_value(self.strategy.hybrid_configs, configs)
@property
def localsgd(self):
"""
Indicating whether we are using Local SGD training. Default Value: False
For more details, please refer to
`Don't Use Large Mini-Batches, Use Local SGD <https://arxiv.org/pdf/1808.07217.pdf>`_.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.localsgd = True # by default this is false
"""
return self.strategy.localsgd
@localsgd.setter
@is_strict_auto
def localsgd(self, flag):
if isinstance(flag, bool):
self.strategy.localsgd = flag
else:
print("WARNING: localsgd should have value of bool type")
@property
def localsgd_configs(self):
"""
Set LocalSGD training configurations. LocalSGD has a configurable
setting that can be configured through a dict.
**Notes**:
k_steps(int) The local steps for training before parameter synchronization. Default 1.
begin_step(int) The step of begining training by localsgd. Default 1.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.localsgd = True
strategy.localsgd_configs = {"k_steps": 4,
"begin_step": 30}
"""
return get_msg_dict(self.strategy.localsgd_configs)
@localsgd_configs.setter
@is_strict_auto
def localsgd_configs(self, configs):
check_configs_key(self.strategy.localsgd_configs, configs,
"localsgd_configs")
assign_configs_value(self.strategy.localsgd_configs, configs)
@property
def adaptive_localsgd(self):
"""
Indicating whether we are using Adaptive Local SGD training. Default Value: False
For more details, please refer to `Adaptive Communication Strategies to Achieve
the Best Error-Runtime Trade-off in Local-Update SGD <https://arxiv.org/pdf/1810.08313.pdf>`_.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.adaptive_localsgd = True # by default this is false
"""
return self.strategy.adaptive_localsgd
@adaptive_localsgd.setter
@is_strict_auto
def adaptive_localsgd(self, flag):
if isinstance(flag, bool):
self.strategy.adaptive_localsgd = flag
else:
print("WARNING: adaptive_localsgd should have value of bool type")
@property
def adaptive_localsgd_configs(self):
"""
Set AdaptiveLocalSGD training configurations. AdaptiveLocalSGD has a configurable
setting that can be configured through a dict.
**Notes**:
init_k_steps(int) The initial steps for training before adaptive localsgd.
Then, the adaptive localsgd method will modify init_k_steps automatically.
Default 1.
begin_step(int) The step of begining training by adaptive localsgd. Default 1.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.adaptive_localsgd = True
strategy.adaptive_localsgd_configs = {"init_k_steps": 1,
"begin_step": 30}
"""
return get_msg_dict(self.strategy.adaptive_localsgd_configs)
@adaptive_localsgd_configs.setter
@is_strict_auto
def adaptive_localsgd_configs(self, configs):
check_configs_key(self.strategy.adaptive_localsgd_configs, configs,
"adaptive_localsgd_configs")
assign_configs_value(self.strategy.adaptive_localsgd_configs, configs)
@property
def dgc(self):
"""
Indicating whether we are using Deep Gradient Compression training. For more details, please refer to
[Deep Gradient Compression](https://arxiv.org/abs/1712.01887).
Default Value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.dgc = True # by default this is false
"""
return self.strategy.dgc
@dgc.setter
@is_strict_auto
def dgc(self, flag):
if isinstance(flag, bool):
self.strategy.dgc = flag
else:
print("WARNING: dgc should have value of bool type")
@property
def dgc_configs(self):
r"""
Set Deep Gradient Compression training configurations. In general, dgc has serveral configurable
settings that can be configured through a dict.
**Notes**:
rampup_begin_step(int): The beginning step from which gradient compression is implemented. Default 0.
rampup_step(int): Time steps used in sparsity warm-up periods. Default is 1. \
For example, if the sparsity is [0.75, 0.9375, 0.984375, 0.996, 0.999], and the rampup_step is 100, \
it will use 0.75 at 0~19 steps, and 0.9375 at 20~39 steps, and so on. And when reach sparsity array \
ends, it will use 0.999 then and after.
sparsity(list[float]): Get top important element from gradient tensor, the ratio is (1 - sparsity). \
Default is [0.999]. For example, if the sparsity is [0.99, 0.999], the top [1%, 0.1%] important \
element will be transmitted.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.dgc = True
strategy.dgc_configs = {"rampup_begin_step": 1252}
"""
return get_msg_dict(self.strategy.dgc_configs)
@dgc_configs.setter
@is_strict_auto
def dgc_configs(self, configs):
check_configs_key(self.strategy.dgc_configs, configs, "dgc_configs")
assign_configs_value(self.strategy.dgc_configs, configs)
@property
def fp16_allreduce(self):
"""
Indicating whether we are using fp16 gradient allreduce training
Default Value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.fp16_allreduce = True # by default this is false
"""
return self.strategy.fp16_allreduce
@fp16_allreduce.setter
@is_strict_auto
def fp16_allreduce(self, flag):
if not isinstance(flag, bool):
raise TypeError('fp16_allreduce must be value of bool type')
self.strategy.fp16_allreduce = flag
@property
def gradient_merge(self):
"""
Gradient Merge, also called as Gradient Accumulation,
is a strategy for large batch training. With this strategy,
model parameter will not be updated until user-defined steps.
For each step, the forward network and the backward network
will run to calculate the gradient of model parameters.
For every k step, the optimization network will run,
applying a specific optimization method (such as SGD, Adam)
to model parameters.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.gradient_merge = True
strategy.gradient_merge_configs = {"k_steps": 4, "avg": True}
"""
return self.strategy.gradient_merge
@gradient_merge.setter
@is_strict_auto
def gradient_merge(self, flag):
if isinstance(flag, bool):
self.strategy.gradient_merge = flag
else:
print("WARNING: gradient_merge should have value of bool type")
@property
def gradient_merge_configs(self):
"""
the key-value configs of distribute_strategy
**Note**:
k_steps(int): the update period of the parameters.
avg(bool): whether to average the gradients of each mini-batch, the default value is `True`
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.gradient_merge = True
strategy.gradient_merge_configs = {"k_steps": 4, "avg": True}
"""
return get_msg_dict(self.strategy.gradient_merge_configs)
@gradient_merge_configs.setter
@is_strict_auto
def gradient_merge_configs(self, configs):
check_configs_key(self.strategy.gradient_merge_configs, configs,
"gradient_configs")
assign_configs_value(self.strategy.gradient_merge_configs, configs)
@property
def lars(self):
"""
Set lars configurations. lars is used to deal with the convergence problems when the global
batch size is larger than 8k. For more details, please refer to
[Large Batch Training of Convolutional Networks](https://arxiv.org/abs/1708.03888).
Default Value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.lars = True # by default this is false
"""
return self.strategy.lars
@lars.setter
@is_strict_auto
def lars(self, flag):
if isinstance(flag, bool):
self.strategy.lars = flag
else:
print("WARNING: lars should have value of bool type")
@property
def lars_configs(self):
"""
Set Lars training configurations.
**Notes**:
**lars_coeff (float)**: trust ratio in lars formula.
**lars_weight_decay** (float): weight decay coefficient in lars formula.
**epsilon (float)**: argument is used to avoid potential devision-by-zero
when compute the local lr;
**exclude_from_weight_decay ([string])**: is a list of name strings of layers which
will be exclude from weight decay in lars formula.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.lars = True
strategy.lars_configs = {
"lars_coeff": 0.01,
"lars_weight_decay": 0.0005,
"epsilon": 0,
"exclude_from_weight_decay": ['batch_norm', '.b_0']
}
"""
return get_msg_dict(self.strategy.lars_configs)
@lars_configs.setter
@is_strict_auto
def lars_configs(self, configs):
check_configs_key(self.strategy.lars_configs, configs, "lars_configs")
assign_configs_value(self.strategy.lars_configs, configs)
@property
def lamb(self):
"""
Set lamb configurations. lamb is used to deal with the convergence problems for large
batch size training, specially for attention-related model like BERT. For more details,
please refer to
[Large Batch Optimization for Deep Learning: Training BERT in 76 minutes](https://arxiv.org/abs/1904.00962).
Default Value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.lamb = True # by default this is false
"""
return self.strategy.lamb
@lamb.setter
@is_strict_auto
def lamb(self, flag):
if isinstance(flag, bool):
self.strategy.lamb = flag
else:
print("WARNING: lamb should have value of bool type")
@property
def lamb_configs(self):
"""
Set Lars training configurations.
**Notes**:
**lamb_weight_decay** (float): weight decay coefficient in lamb formula.
**exclude_from_weight_decay ([string])**: is a list of name strings of layers which
will be exclude from weight decay in lamb formula.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.lamb = True
strategy.lamb_configs = {
'lamb_weight_decay': 0.01,
'exclude_from_weight_decay': [],
}
"""
return get_msg_dict(self.strategy.lamb_configs)
@lamb_configs.setter
@is_strict_auto
def lamb_configs(self, configs):
check_configs_key(self.strategy.lamb_configs, configs, "lamb_configs")
assign_configs_value(self.strategy.lamb_configs, configs)
@property
def elastic(self):
"""
Indicating whether we want to do current distributed training on clusters with elastic resources.
Currently, this is configuration is not valid.
"""
return self.strategy.elastic
@elastic.setter
@is_strict_auto
def elastic(self, flag):
if isinstance(flag, bool):
self.strategy.elastic = flag
else:
print("WARNING: elastic should have value of bool type")
@property
def auto(self):
"""
Indicating whether we are using auto-parallel configuration
This feature is currently an experimental feature. Currently,
auto-parallelism can be used only when a user does not set any other
strategy configs except auto. For details, please reference the following
code example
Default Value: False
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.auto = True
# if set other strategy at the same time, auto will not apply
# strategy.amp = True
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
"""
return self.strategy.auto
@auto.setter
def auto(self, flag):
if isinstance(flag, bool):
self.strategy.auto = flag
else:
print("WARNING: auto should have value of bool type")
@property
def cudnn_exhaustive_search(self):
"""
Indicating whether to use exhaustive search method to choose convolution algorithms.
Exhaustive search attempts all cuDNN algorithms to choose the fastest algorithm.
This method is time-consuming, the choosed algorithm will be cached for the given layer specifications.
Once the layer specifications (like batch size, feature map size) are changed, it will search again.
Default Value: True
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.cudnn_exhaustive_search = False
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
"""
return self.strategy.cudnn_exhaustive_search
@cudnn_exhaustive_search.setter
@is_strict_auto
def cudnn_exhaustive_search(self, flag):
if isinstance(flag, bool):
self.strategy.cudnn_exhaustive_search = flag
else:
print(
"WARNING: cudnn_exhaustive_search should have value of bool type"
)
@property
def conv_workspace_size_limit(self):
"""
The workspace limit size in MB unit for choosing cuDNN convolution algorithms.
The inner funciton of cuDNN obtain the fastest suited algorithm that fits within this memory limit.
Usually, large workspace size may lead to choose faster algorithms,
but significant increasing memory workspace. Users need to trade-off between memory and speed.
Default Value: 4000
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.conv_workspace_size_limit = 1024
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
"""
return self.strategy.conv_workspace_size_limit
@conv_workspace_size_limit.setter
@is_strict_auto
def conv_workspace_size_limit(self, value):
if isinstance(value, int):
self.strategy.conv_workspace_size_limit = value
else:
print(
"WARNING: conv_workspace_size_limit should have value of int type"
)
@property
def cudnn_batchnorm_spatial_persistent(self):
"""
Indicates whether to use the mode CUDNN_BATCHNORM_SPATIAL_PERSISTENT function in batchnorm.
This is only useful in cudnn.
Default Value: True
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.cudnn_batchnorm_spatial_persistent = True
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
"""
return self.strategy.cudnn_batchnorm_spatial_persistent
@cudnn_batchnorm_spatial_persistent.setter
@is_strict_auto
def cudnn_batchnorm_spatial_persistent(self, flag):
if isinstance(flag, bool):
self.strategy.cudnn_batchnorm_spatial_persistent = flag
else:
print(
"WARNING: cudnn_batchnorm_spatial_persistent should have value of bool type"
)
def _enable_env(self):
strategy = self.strategy
keys = [
"FLAGS_cudnn_batchnorm_spatial_persistent",
"FLAGS_conv_workspace_size_limit",
"FLAGS_cudnn_exhaustive_search",
"FLAGS_sync_nccl_allreduce",
"FLAGS_fuse_parameter_memory_size",
"FLAGS_fuse_parameter_groups_size",
]
values = [
bool(strategy.cudnn_batchnorm_spatial_persistent),
int(strategy.conv_workspace_size_limit),
bool(strategy.cudnn_exhaustive_search),
bool(strategy.sync_nccl_allreduce),
int(strategy.fuse_grad_size_in_MB),
int(strategy.fuse_grad_size_in_TFLOPS),
]
for i, key in enumerate(keys):
if _global_flags().is_public(key):
_global_flags()[key] = values[i]
def _is_strict_auto(self):
global non_auto_func_called
if self.strategy.auto and non_auto_func_called:
return True
return False
def __repr__(self):
spacing = 2
max_k = 38
max_v = 38
length = max_k + max_v + spacing
h1_format = " " + "|{{:^{}s}}|\n".format(length)
h2_format = " " + "|{{:>{}s}}{}{{:^{}s}}|\n".format(max_k, " " *
spacing, max_v)
border = " +" + "".join(["="] * length) + "+"
line = " +" + "".join(["-"] * length) + "+"
draws = border + "\n"
draws += h1_format.format("")
draws += h1_format.format("DistributedStrategy Overview")
draws += h1_format.format("")
fields = self.strategy.DESCRIPTOR.fields
str_res = ""
env_draws = line + "\n"
for f in fields:
if "build_strategy" in f.name or "execution_strategy" in f.name:
continue
if "_configs" in f.name:
continue
else:
if isinstance(getattr(self.strategy, f.name), bool):
if hasattr(self.strategy, f.name + "_configs"):
if getattr(self.strategy, f.name):
draws += border + "\n"
draws += h1_format.format(
"{}=True <-> {}_configs".format(f.name, f.name))
draws += line + "\n"
my_configs = getattr(self.strategy,
f.name + "_configs")
config_fields = my_configs.DESCRIPTOR.fields
for ff in config_fields:
if isinstance(
getattr(my_configs, ff.name),
google.protobuf.pyext._message.
RepeatedScalarContainer):
values = getattr(my_configs, ff.name)
for i, v in enumerate(values):
if i == 0:
draws += h2_format.format(ff.name,
str(v))
else:
draws += h2_format.format("",
str(v))
else:
draws += h2_format.format(
ff.name,
str(getattr(my_configs, ff.name)))
else:
env_draws += h2_format.format(
f.name, str(getattr(self.strategy, f.name)))
else:
env_draws += h2_format.format(
f.name, str(getattr(self.strategy, f.name)))
result_res = draws + border + "\n" + h1_format.format(
"Environment Flags, Communication Flags")
result_res += env_draws
build_strategy_str = border + "\n"
build_strategy_str += h1_format.format("Build Strategy")
build_strategy_str += line + "\n"
fields = self.strategy.build_strategy.DESCRIPTOR.fields
for f in fields:
build_strategy_str += h2_format.format(
f.name, str(getattr(self.strategy.build_strategy, f.name)))
build_strategy_str += border + "\n"
execution_strategy_str = h1_format.format("Execution Strategy")
execution_strategy_str += line + "\n"
fields = self.strategy.execution_strategy.DESCRIPTOR.fields
for f in fields:
execution_strategy_str += h2_format.format(
f.name, str(getattr(self.strategy.execution_strategy, f.name)))
execution_strategy_str += border + "\n"
result_res += build_strategy_str + execution_strategy_str
return result_res
| apache-2.0 | -8,822,285,084,877,831,000 | 34.883058 | 324 | 0.608985 | false |
projecthamster/hamster-dbus | docs/conf.py | 1 | 8937 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# hamster-dbus documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import datetime
import shlex
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import hamster_dbus
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
]
intersphinx_mapping = {
'gobject': ('https://lazka.github.io/pgi-docs/GObject-2.0/', None),
'python': ('https://docs.python.org/3', None),
'hamster-lib': ('http://hamster-lib.docs.projecthamster.org/en/latest/', None),
'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'hamster-dbus'
copyright = u'2016-2017, Eric Goller'
author = u'Eric Goller'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = hamster_dbus.__version__
# The full version, including alpha/beta/rc tags.
release = hamster_dbus.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'hamster-dbusdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'hamster-dbus.tex',
u'hamster-dbus Documentation',
u'Eric Goller', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hamster-dbus',
u'hamster-dbus Documentation',
[u'Eric Goller'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'hamster-dbus',
u'hamster-dbus Documentation',
u'Eric Goller',
'hamster-dbus',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 | -4,234,435,585,753,774,000 | 29.397959 | 83 | 0.70113 | false |
israeltobias/DownMedia | youtube-dl/youtube_dl/extractor/amp.py | 27 | 3480 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
mimetype2ext,
determine_ext,
)
class AMPIE(InfoExtractor):
# parse Akamai Adaptive Media Player feed
def _extract_feed_info(self, url):
item = self._download_json(
url, None, 'Downloading Akamai AMP feed',
'Unable to download Akamai AMP feed')['channel']['item']
video_id = item['guid']
def get_media_node(name, default=None):
media_name = 'media-%s' % name
media_group = item.get('media-group') or item
return media_group.get(media_name) or item.get(media_name) or item.get(name, default)
thumbnails = []
media_thumbnail = get_media_node('thumbnail')
if media_thumbnail:
if isinstance(media_thumbnail, dict):
media_thumbnail = [media_thumbnail]
for thumbnail_data in media_thumbnail:
thumbnail = thumbnail_data['@attributes']
thumbnails.append({
'url': self._proto_relative_url(thumbnail['url'], 'http:'),
'width': int_or_none(thumbnail.get('width')),
'height': int_or_none(thumbnail.get('height')),
})
subtitles = {}
media_subtitle = get_media_node('subTitle')
if media_subtitle:
if isinstance(media_subtitle, dict):
media_subtitle = [media_subtitle]
for subtitle_data in media_subtitle:
subtitle = subtitle_data['@attributes']
lang = subtitle.get('lang') or 'en'
subtitles[lang] = [{'url': subtitle['href']}]
formats = []
media_content = get_media_node('content')
if isinstance(media_content, dict):
media_content = [media_content]
for media_data in media_content:
media = media_data.get('@attributes', {})
media_url = media.get('url')
if not media_url:
continue
ext = mimetype2ext(media.get('type')) or determine_ext(media_url)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
media_url + '?hdcore=3.4.0&plugin=aasp-3.4.0.132.124',
video_id, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
else:
formats.append({
'format_id': media_data.get('media-category', {}).get('@attributes', {}).get('label'),
'url': media['url'],
'tbr': int_or_none(media.get('bitrate')),
'filesize': int_or_none(media.get('fileSize')),
'ext': ext,
})
self._sort_formats(formats)
timestamp = parse_iso8601(item.get('pubDate'), ' ') or parse_iso8601(item.get('dc-date'))
return {
'id': video_id,
'title': get_media_node('title'),
'description': get_media_node('description'),
'thumbnails': thumbnails,
'timestamp': timestamp,
'duration': int_or_none(media_content[0].get('@attributes', {}).get('duration')),
'subtitles': subtitles,
'formats': formats,
}
| gpl-3.0 | -4,717,861,870,326,563,000 | 38.101124 | 106 | 0.527586 | false |
rapidhere/rpbtman_autosign | pytz/zoneinfo/Africa/Casablanca.py | 9 | 1184 | '''tzinfo timezone information for Africa/Casablanca.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Casablanca(DstTzInfo):
'''Africa/Casablanca timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Casablanca'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1913,10,26,0,30,20),
d(1939,9,12,0,0,0),
d(1939,11,18,23,0,0),
d(1940,2,25,0,0,0),
d(1945,11,17,23,0,0),
d(1950,6,11,0,0,0),
d(1950,10,28,23,0,0),
d(1967,6,3,12,0,0),
d(1967,9,30,23,0,0),
d(1974,6,24,0,0,0),
d(1974,8,31,23,0,0),
d(1976,5,1,0,0,0),
d(1976,7,31,23,0,0),
d(1977,5,1,0,0,0),
d(1977,9,27,23,0,0),
d(1978,6,1,0,0,0),
d(1978,8,3,23,0,0),
d(1984,3,16,0,0,0),
d(1985,12,31,23,0,0),
]
_transition_info = [
i(-1800,0,'LMT'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,0,'CET'),
i(0,0,'WET'),
]
Casablanca = Casablanca()
| gpl-3.0 | 1,986,346,365,785,845,500 | 19.413793 | 80 | 0.606419 | false |
inaz2/deluge-hack | deluge/httpdownloader.py | 1 | 8499 | #
# httpdownloader.py
#
# Copyright (C) 2009 Andrew Resch <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
from twisted.web import client, http
from twisted.web.error import PageRedirect
from twisted.python.failure import Failure
from twisted.internet import reactor
from deluge.log import setupLogger, LOG as log
from common import get_version
import os.path
import zlib
class HTTPDownloader(client.HTTPDownloader):
"""
Factory class for downloading files and keeping track of progress.
"""
def __init__(self, url, filename, part_callback=None, headers=None, force_filename=False, allow_compression=True):
"""
:param url: the url to download from
:type url: string
:param filename: the filename to save the file as
:type filename: string
:param force_filename: forces use of the supplied filename, regardless of header content
:type force_filename: bool
:param part_callback: a function to be called when a part of data
is received, it's signature should be: func(data, current_length, total_length)
:type part_callback: function
:param headers: any optional headers to send
:type headers: dictionary
"""
self.part_callback = part_callback
self.current_length = 0
self.decoder = None
self.value = filename
self.force_filename = force_filename
self.allow_compression = allow_compression
agent = "Deluge/%s (http://deluge-torrent.org)" % get_version()
client.HTTPDownloader.__init__(self, url, filename, headers=headers, agent=agent)
def gotStatus(self, version, status, message):
self.code = int(status)
client.HTTPDownloader.gotStatus(self, version, status, message)
def gotHeaders(self, headers):
if self.code == http.OK:
if "content-length" in headers:
self.total_length = int(headers["content-length"][0])
else:
self.total_length = 0
if self.allow_compression and "content-encoding" in headers and \
headers["content-encoding"][0] in ("gzip", "x-gzip", "deflate"):
# Adding 32 to the wbits enables gzip & zlib decoding (with automatic header detection)
# Adding 16 just enables gzip decoding (no zlib)
self.decoder = zlib.decompressobj(zlib.MAX_WBITS + 32)
if "content-disposition" in headers and not self.force_filename:
new_file_name = str(headers["content-disposition"][0]).split(";")[1].split("=")[1]
new_file_name = sanitise_filename(new_file_name)
new_file_name = os.path.join(os.path.split(self.fileName)[0], new_file_name)
count = 1
fileroot = os.path.splitext(new_file_name)[0]
fileext = os.path.splitext(new_file_name)[1]
while os.path.isfile(new_file_name):
# Increment filename if already exists
new_file_name = "%s-%s%s" % (fileroot, count, fileext)
count += 1
self.fileName = new_file_name
self.value = new_file_name
elif self.code in (http.MOVED_PERMANENTLY, http.FOUND, http.SEE_OTHER, http.TEMPORARY_REDIRECT):
location = headers["location"][0]
error = PageRedirect(self.code, location=location)
self.noPage(Failure(error))
return client.HTTPDownloader.gotHeaders(self, headers)
def pagePart(self, data):
if self.code == http.OK:
self.current_length += len(data)
if self.decoder:
data = self.decoder.decompress(data)
if self.part_callback:
self.part_callback(data, self.current_length, self.total_length)
return client.HTTPDownloader.pagePart(self, data)
def pageEnd(self):
if self.decoder:
data = self.decoder.flush()
self.current_length -= len(data)
self.decoder = None
self.pagePart(data)
return client.HTTPDownloader.pageEnd(self)
def sanitise_filename(filename):
"""
Sanitises a filename to use as a download destination file.
Logs any filenames that could be considered malicious.
:param filename: the filename to sanitise
:type filename: string
:returns: the sanitised filename
:rtype: string
"""
# Remove any quotes
filename = filename.strip("'\"")
if os.path.basename(filename) != filename:
# Dodgy server, log it
log.warning("Potentially malicious server: trying to write to file '%s'" % filename)
# Only use the basename
filename = os.path.basename(filename)
filename = filename.strip()
if filename.startswith(".") or ";" in filename or "|" in filename:
# Dodgy server, log it
log.warning("Potentially malicious server: trying to write to file '%s'" % filename)
return filename
def download_file(url, filename, callback=None, headers=None, force_filename=False, allow_compression=True):
"""
Downloads a file from a specific URL and returns a Deferred. You can also
specify a callback function to be called as parts are received.
:param url: the url to download from
:type url: string
:param filename: the filename to save the file as
:type filename: string
:param callback: a function to be called when a part of data is received,
it's signature should be: func(data, current_length, total_length)
:type callback: function
:param headers: any optional headers to send
:type headers: dictionary
:param force_filename: force us to use the filename specified rather than
one the server may suggest
:type force_filename: boolean
:param allow_compression: allows gzip & deflate decoding
:type allow_compression: boolean
:returns: the filename of the downloaded file
:rtype: Deferred
:raises t.w.e.PageRedirect: when server responds with a temporary redirect
or permanently moved.
:raises t.w.e.Error: for all other HTTP response errors (besides OK)
"""
url = str(url)
filename = str(filename)
if headers:
for key, value in headers.items():
headers[str(key)] = str(value)
if allow_compression:
if not headers:
headers = {}
headers["accept-encoding"] = "deflate, gzip, x-gzip"
# In twisted 13.1.0 the _parse() function was replaced by the _URI class
if hasattr(client, '_parse'):
scheme, host, port, path = client._parse(url)
else:
from twisted.web import _URI
uri = _URI.fromBytes(url)
scheme = uri.scheme
host = uri.host
port = uri.port
path = uri.originFrom
factory = HTTPDownloader(url, filename, callback, headers, force_filename, allow_compression)
if scheme == "https":
from twisted.internet import ssl
reactor.connectSSL(host, port, factory, ssl.ClientContextFactory())
else:
reactor.connectTCP(host, port, factory)
return factory.deferred
| gpl-3.0 | 7,980,435,034,071,178,000 | 38.901408 | 118 | 0.650665 | false |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/cryptography-0.9.3/tests/hazmat/primitives/test_blowfish.py | 9 | 2801 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import binascii
import os
import pytest
from cryptography.hazmat.backends.interfaces import CipherBackend
from cryptography.hazmat.primitives.ciphers import algorithms, modes
from .utils import generate_encrypt_test
from ...utils import load_nist_vectors
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.Blowfish("\x00" * 56), modes.ECB()
),
skip_message="Does not support Blowfish ECB",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestBlowfishModeECB(object):
test_ECB = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "Blowfish"),
["bf-ecb.txt"],
lambda key, **kwargs: algorithms.Blowfish(binascii.unhexlify(key)),
lambda **kwargs: modes.ECB(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.Blowfish("\x00" * 56), modes.CBC("\x00" * 8)
),
skip_message="Does not support Blowfish CBC",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestBlowfishModeCBC(object):
test_CBC = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "Blowfish"),
["bf-cbc.txt"],
lambda key, **kwargs: algorithms.Blowfish(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.Blowfish("\x00" * 56), modes.OFB("\x00" * 8)
),
skip_message="Does not support Blowfish OFB",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestBlowfishModeOFB(object):
test_OFB = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "Blowfish"),
["bf-ofb.txt"],
lambda key, **kwargs: algorithms.Blowfish(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.OFB(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.Blowfish("\x00" * 56), modes.CFB("\x00" * 8)
),
skip_message="Does not support Blowfish CFB",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestBlowfishModeCFB(object):
test_CFB = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "Blowfish"),
["bf-cfb.txt"],
lambda key, **kwargs: algorithms.Blowfish(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CFB(binascii.unhexlify(iv)),
)
| mit | -4,129,838,176,619,132,000 | 32.345238 | 79 | 0.685826 | false |
ashhher3/cvxpy | cvxpy/tests/test_convolution.py | 11 | 4153 | """
Copyright 2013 Steven Diamond, Eric Chu
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy import *
import cvxpy.settings as s
from cvxpy.lin_ops.tree_mat import mul, tmul, prune_constants
import cvxpy.problems.iterative as iterative
from cvxpy.utilities import Curvature
from cvxpy.utilities import Sign
from cvxpy.tests.base_test import BaseTest
import numpy as np
class TestConvolution(BaseTest):
""" Unit tests for convolution. """
def test_1D_conv(self):
"""Test 1D convolution.
"""
n = 3
x = Variable(n)
f = [1, 2, 3]
g = [0, 1, 0.5]
f_conv_g = [ 0., 1., 2.5, 4., 1.5]
expr = conv(f, g)
assert expr.is_constant()
self.assertEquals(expr.size, (5, 1))
self.assertItemsAlmostEqual(expr.value, f_conv_g)
expr = conv(f, x)
assert expr.is_affine()
self.assertEquals(expr.size, (5, 1))
# Matrix stuffing.
t = Variable()
prob = Problem(Minimize(norm(expr, 1)),
[x == g])
result = prob.solve()
self.assertAlmostEqual(result, sum(f_conv_g))
self.assertItemsAlmostEqual(expr.value, f_conv_g)
# # Expression trees.
# prob = Problem(Minimize(norm(expr, 1)))
# self.prob_mat_vs_mul_funcs(prob)
# result = prob.solve(solver=SCS, expr_tree=True, verbose=True)
# self.assertAlmostEqual(result, 0, places=1)
def prob_mat_vs_mul_funcs(self, prob):
data, dims = prob.get_problem_data(solver=SCS)
A = data["A"]
objective, constr_map, dims, solver = prob.canonicalize(SCS)
all_ineq = constr_map[s.EQ] + constr_map[s.LEQ]
var_offsets, var_sizes, x_length = prob._get_var_offsets(objective,
all_ineq)
opts = {}
constraints = constr_map[s.EQ] + constr_map[s.LEQ]
constraints = prune_constants(constraints)
Amul, ATmul = iterative.get_mul_funcs(constraints, dims,
var_offsets, var_sizes,
x_length)
vec = np.array(range(1, x_length+1))
# A*vec
result = np.zeros(A.shape[0])
Amul(vec, result)
mul_mat = self.mat_from_func(Amul, A.shape[0], A.shape[1])
self.assertItemsAlmostEqual(A*vec, result)
Amul(vec, result)
self.assertItemsAlmostEqual(2*A*vec, result)
# A.T*vec
vec = np.array(range(A.shape[0]))
result = np.zeros(A.shape[1])
ATmul(vec, result)
self.assertItemsAlmostEqual(A.T*vec, result)
ATmul(vec, result)
self.assertItemsAlmostEqual(2*A.T*vec, result)
def mat_from_func(self, func, rows, cols):
"""Convert a multiplier function to a matrix.
"""
test_vec = np.zeros(cols)
result = np.zeros(rows)
matrix = np.zeros((rows, cols))
for i in range(cols):
test_vec[i] = 1.0
func(test_vec, result)
matrix[:, i] = result
test_vec *= 0
result *= 0
return matrix
def test_conv_prob(self):
"""Test a problem with convolution.
"""
import cvxpy as cvx
import numpy as np
N = 5
y = np.asmatrix(np.random.randn(N, 1))
h = np.asmatrix(np.random.randn(2, 1))
x = cvx.Variable(N)
v = cvx.conv(h, x)
obj = cvx.Minimize(cvx.sum_entries(cvx.mul_elemwise(y,v[0:N])))
print(cvx.Problem(obj, []).solve())
| gpl-3.0 | 5,661,699,444,197,006,000 | 34.194915 | 75 | 0.586564 | false |
jrha/aquilon | tests/broker/test_update_machine.py | 2 | 23821 | #!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'Module for testing the update machine command.'
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestUpdateMachine(TestBrokerCommand):
def testupdateut3c1n3(self):
self.noouttest(["update", "machine", "--machine", "ut3c1n3",
"--slot", "10", "--serial", "USN99C5553"])
def testverifyupdateut3c1n3(self):
command = "show machine --machine ut3c1n3"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Blade: ut3c1n3", command)
self.matchoutput(out, "Chassis: ut3c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 10", command)
self.matchoutput(out, "Vendor: ibm Model: hs21-8853l5u", command)
self.matchoutput(out, "Cpu: xeon_2660 x 2", command)
self.matchoutput(out, "Memory: 8192 MB", command)
self.matchoutput(out, "Serial: USN99C5553", command)
def testverifycatut3c1n3(self):
command = "cat --machine ut3c1n3"
out = self.commandtest(command.split(" "))
self.matchoutput(out, '"location" = "ut.ny.na";', command)
self.matchoutput(out, '"serialnumber" = "USN99C5553";', command)
self.matchoutput(out,
'include { "hardware/machine/ibm/hs21-8853l5u" };',
command)
self.searchoutput(out,
r'"ram" = list\(\s*'
r'create\("hardware/ram/generic",\s*'
r'"size", 8192\*MB\s*\)\s*\);',
command)
self.searchoutput(out,
r'"cpu" = list\(\s*'
r'create\("hardware/cpu/intel/xeon_2660"\),\s*'
r'create\("hardware/cpu/intel/xeon_2660"\s*\)\s*\);',
command)
def testupdateut3c5n10(self):
self.noouttest(["update", "machine",
"--hostname", "unittest02.one-nyp.ms.com",
"--chassis", "ut3c5.aqd-unittest.ms.com", "--slot", "2"])
def testverifyshowslot(self):
command = "show machine --slot 2"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Blade: ut3c5n10", command)
def testverifyshowchassisslot(self):
command = "show machine --chassis ut3c5.aqd-unittest.ms.com --slot 2"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Blade: ut3c5n10", command)
def testverifyupdateut3c5n10(self):
command = "show machine --machine ut3c5n10"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Blade: ut3c5n10", command)
self.matchoutput(out, "Chassis: ut3c5.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 2", command)
self.matchoutput(out, "Vendor: ibm Model: hs21-8853l5u", command)
self.matchoutput(out, "Cpu: xeon_2660 x 2", command)
self.matchoutput(out, "Memory: 8192 MB", command)
self.matchoutput(out, "Serial: 99C5553", command)
def testverifycatut3c5n10(self):
command = "cat --machine ut3c5n10"
out = self.commandtest(command.split(" "))
self.matchoutput(out, '"location" = "ut.ny.na";', command)
self.matchoutput(out, '"serialnumber" = "99C5553";', command)
self.matchoutput(out,
'include { "hardware/machine/ibm/hs21-8853l5u" };',
command)
self.searchoutput(out,
r'"ram" = list\(\s*'
r'create\("hardware/ram/generic",\s*'
r'"size", 8192\*MB\s*\)\s*\);',
command)
self.searchoutput(out,
r'"cpu" = list\(\s*'
r'create\("hardware/cpu/intel/xeon_2660"\),\s*'
r'create\("hardware/cpu/intel/xeon_2660"\s*\)\s*\);',
command)
def testupdateut3c1n4(self):
self.noouttest(["update", "machine", "--machine", "ut3c1n4",
"--serial", "USNKPDZ407"])
def testupdateut3c1n4cpubadvendor(self):
self.notfoundtest(["update", "machine", "--machine", "ut3c1n4",
"--cpuvendor", "no-such-vendor"])
def testupdateut3c1n4cpubadname(self):
self.notfoundtest(["update", "machine", "--machine", "ut3c1n4",
"--cpuname", "no-such-cpu"])
def testupdateut3c1n4cpureal(self):
self.noouttest(["update", "machine", "--machine", "ut3c1n4",
"--cpuname", "xeon_3000"])
def testupdateut3c1n4rack(self):
# Changing the rack will hit the machine_plenary_will_move logic so we
# can test if the host profile gets written
self.noouttest(["update", "machine", "--machine", "ut3c1n4",
"--rack", "ut4"])
def testverifyupdateut3c1n4(self):
command = "show machine --machine ut3c1n4"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Blade: ut3c1n4", command)
self.matchoutput(out, "Rack: ut4", command)
self.matchoutput(out, "Vendor: ibm Model: hs21-8853l5u", command)
self.matchoutput(out, "Cpu: xeon_3000 x 2", command)
self.matchoutput(out, "Memory: 8192 MB", command)
self.matchoutput(out, "Serial: USNKPDZ407", command)
def testverifycatut3c1n4(self):
command = "cat --machine ut3c1n4"
out = self.commandtest(command.split(" "))
self.matchoutput(out, '"location" = "ut.ny.na";', command)
self.matchoutput(out, '"serialnumber" = "USNKPDZ407";', command)
self.matchoutput(out,
'include { "hardware/machine/ibm/hs21-8853l5u" };',
command)
self.searchoutput(out,
r'"ram" = list\(\s*'
r'create\("hardware/ram/generic",\s*'
r'"size", 8192\*MB\s*\)\s*\);',
command)
self.searchoutput(out,
r'"cpu" = list\(\s*'
r'create\("hardware/cpu/intel/xeon_3000"\),\s*'
r'create\("hardware/cpu/intel/xeon_3000"\s*\)\s*\);',
command)
def testverifycatunittest01(self):
# There should be no host template present after the update_machine
# command
command = ["cat", "--hostname", "unittest01.one-nyp.ms.com"]
out = self.internalerrortest(command)
self.matchoutput(out, "No such file or directory", command)
def testclearchassis(self):
command = ["update", "machine", "--machine", "ut9s03p1",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "1"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p1",
"--clearchassis"]
self.noouttest(command)
def testverifyclearchassis(self):
command = ["show", "machine", "--machine", "ut9s03p1"]
out = self.commandtest(command)
self.matchoutput(out, "Blade: ut9s03p1", command)
self.matchclean(out, "Chassis: ", command)
def testclearchassisplusnew(self):
command = ["update", "machine", "--machine", "ut9s03p2",
"--chassis", "ut9c5.aqd-unittest.ms.com", "--slot", "1"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p2",
"--clearchassis",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "2"]
self.noouttest(command)
def testverifyclearchassisplusnew(self):
command = ["show", "machine", "--machine", "ut9s03p2"]
out = self.commandtest(command)
self.matchoutput(out, "Blade: ut9s03p2", command)
self.matchoutput(out, "Chassis: ut9c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 2", command)
def testtruechassisupdate(self):
command = ["update", "machine", "--machine", "ut9s03p3",
"--chassis", "ut9c5.aqd-unittest.ms.com", "--slot", "2"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p3",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "3"]
self.noouttest(command)
def testverifytruechassisupdate(self):
command = ["show", "machine", "--machine", "ut9s03p3"]
out = self.commandtest(command)
self.matchoutput(out, "Blade: ut9s03p3", command)
self.matchoutput(out, "Chassis: ut9c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 3", command)
def testsimplechassisupdate(self):
command = ["update", "machine", "--machine", "ut9s03p4",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "4"]
self.noouttest(command)
def testverifysimplechassisupdate(self):
command = ["show", "machine", "--machine", "ut9s03p4"]
out = self.commandtest(command)
self.matchoutput(out, "Blade: ut9s03p4", command)
self.matchoutput(out, "Chassis: ut9c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 4", command)
def testsimplechassisupdatewithrack(self):
# The rack info is redundant but valid
command = ["update", "machine", "--machine", "ut9s03p5",
"--rack", "ut9",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "5"]
self.noouttest(command)
def testverifysimplechassisupdatewithrack(self):
command = ["show", "machine", "--machine", "ut9s03p5"]
out = self.commandtest(command)
self.matchoutput(out, "Blade: ut9s03p5", command)
self.matchoutput(out, "Chassis: ut9c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 5", command)
def testtruechassisupdatewithrack(self):
# The rack info is redundant but valid
command = ["update", "machine", "--machine", "ut9s03p6",
"--chassis", "ut9c5.aqd-unittest.ms.com", "--slot", "4"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p6",
"--rack", "ut9",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "6"]
self.noouttest(command)
def testverifytruechassisupdatewithrack(self):
command = ["show", "machine", "--machine", "ut9s03p6"]
out = self.commandtest(command)
self.matchoutput(out, "Blade: ut9s03p6", command)
self.matchoutput(out, "Chassis: ut9c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 6", command)
def testmissingslot(self):
command = ["update", "machine", "--machine", "ut9s03p7",
"--chassis", "ut9c1.aqd-unittest.ms.com"]
out = self.badrequesttest(command)
self.matchoutput(out, "Option --chassis requires --slot information",
command)
def testverifymissingslot(self):
command = ["show", "machine", "--machine", "ut9s03p7"]
out = self.commandtest(command)
self.matchoutput(out, "Blade: ut9s03p7", command)
self.matchclean(out, "Chassis: ", command)
self.matchclean(out, "Slot: ", command)
def testmissingchassis(self):
command = ["update", "machine", "--machine", "ut9s03p8",
"--slot", "8"]
out = self.badrequesttest(command)
self.matchoutput(out, "Option --slot requires --chassis information",
command)
def testverifymissingchassis(self):
command = ["show", "machine", "--machine", "ut9s03p8"]
out = self.commandtest(command)
self.matchoutput(out, "Blade: ut9s03p8", command)
self.matchclean(out, "Chassis: ", command)
self.matchclean(out, "Slot: ", command)
def testdifferentrack(self):
command = ["update", "machine", "--machine", "ut9s03p9",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "9"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p9",
"--rack", "ut8"]
(out, err) = self.successtest(command)
self.matchoutput(err,
"Warning: Host server9.aqd-unittest.ms.com is missing "
"the following required services, please run 'aq "
"reconfigure': afs, aqd, bootserver, dns, lemon, "
"ntp, support-group.",
command)
def testverifydifferentrack(self):
command = ["show", "machine", "--machine", "ut9s03p9"]
out = self.commandtest(command)
self.matchoutput(out, "Blade: ut9s03p9", command)
self.matchclean(out, "Chassis: ", command)
self.matchclean(out, "Slot: ", command)
def testreuseslot(self):
command = ["update", "machine", "--machine", "ut9s03p10",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "10"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p10",
"--clearchassis"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p10",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "10"]
self.noouttest(command)
def testverifyreuseslot(self):
command = ["show", "machine", "--machine", "ut9s03p10"]
out = self.commandtest(command)
self.matchoutput(out, "Blade: ut9s03p10", command)
self.matchoutput(out, "Chassis: ut9c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 10", command)
def testtakenslot(self):
command = ["update", "machine", "--machine", "ut9s03p11",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "11"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p12",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "11"]
out = self.badrequesttest(command)
self.matchoutput(out, "Chassis ut9c1.aqd-unittest.ms.com slot 11 "
"already has machine ut9s03p11", command)
def testverifytakenslot(self):
command = ["show", "machine", "--machine", "ut9s03p11"]
out = self.commandtest(command)
self.matchoutput(out, "Blade: ut9s03p11", command)
self.matchoutput(out, "Chassis: ut9c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 11", command)
command = ["show", "machine", "--machine", "ut9s03p12"]
out = self.commandtest(command)
self.matchoutput(out, "Blade: ut9s03p12", command)
self.matchclean(out, "Chassis: ", command)
self.matchclean(out, "Slot: ", command)
def testmultislotclear(self):
command = ["update", "machine", "--machine", "ut9s03p13",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "13"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p13",
"--multislot",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "14"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p13",
"--clearchassis"]
self.noouttest(command)
def testverifymultislotclear(self):
command = ["show", "machine", "--machine", "ut9s03p13"]
out = self.commandtest(command)
self.matchoutput(out, "Blade: ut9s03p13", command)
self.matchclean(out, "Chassis: ", command)
self.matchclean(out, "Slot: ", command)
def testmultislotadd(self):
command = ["update", "machine", "--machine", "ut9s03p15",
"--multislot",
"--chassis", "ut9c2.aqd-unittest.ms.com", "--slot", "1"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p15",
"--multislot",
"--chassis", "ut9c2.aqd-unittest.ms.com", "--slot", "2"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p15",
"--multislot",
"--chassis", "ut9c2.aqd-unittest.ms.com", "--slot", "3"]
self.noouttest(command)
def testverifymultislotadd(self):
command = ["show", "machine", "--machine", "ut9s03p15"]
out = self.commandtest(command)
self.matchoutput(out, "Blade: ut9s03p15", command)
self.matchoutput(out, "Chassis: ut9c2.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 1", command)
self.matchoutput(out, "Slot: 2", command)
self.matchoutput(out, "Slot: 3", command)
def testmultislotupdatefail(self):
command = ["update", "machine", "--machine", "ut9s03p19",
"--chassis", "ut9c2.aqd-unittest.ms.com", "--slot", "4"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p19",
"--multislot",
"--chassis", "ut9c2.aqd-unittest.ms.com", "--slot", "5"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p19",
"--chassis", "ut9c2.aqd-unittest.ms.com", "--slot", "6"]
out = self.badrequesttest(command)
self.matchoutput(out, "Use --multislot to support a machine in more "
"than one slot", command)
def testverifymultislotupdatefail(self):
command = ["show", "machine", "--machine", "ut9s03p19"]
out = self.commandtest(command)
self.matchoutput(out, "Blade: ut9s03p19", command)
self.matchoutput(out, "Chassis: ut9c2.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 4", command)
self.matchoutput(out, "Slot: 5", command)
self.matchclean(out, "Slot: 6", command)
def testfailmissingcluster(self):
command = ["update_machine", "--machine=evm1",
"--cluster=cluster-does-not-exist"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Cluster cluster-does-not-exist not found.",
command)
def testfailchangemetacluster(self):
command = ["update_machine", "--machine=evm1", "--cluster=utecl13"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Current ESX metacluster utmc1 does not match "
"new ESX metacluster utmc7.",
command)
def testallowchangemetacluster_05(self):
command = ["show_share", "--all"]
out = self.commandtest(command)
# Initially the VM is on utecl1, test_share_1 is not used on utecl2
self.searchoutput(out,
r'Share: test_share_1\s*'
r'Bound to: ESX Cluster utecl1\s*'
r'Server: lnn30f1\s*'
r'Mountpoint: /vol/lnn30f1v1/test_share_1\s*'
r'Disk Count: 1\s*'
r'Machine Count: 1\s*',
command)
self.searchoutput(out,
r'Share: test_share_1\s*'
r'Bound to: ESX Cluster utecl2\s*'
r'Server: lnn30f1\s*'
r'Mountpoint: /vol/lnn30f1v1/test_share_1\s*'
r'Disk Count: 0\s*'
r'Machine Count: 0\s*',
command)
def testallowchangemetacluster_10(self):
command = ["update_machine", "--machine=evm1", "--cluster=utecl13",
"--allow_metacluster_change"]
out = self.commandtest(command)
def testallowchangemetacluster_15(self):
command = ["show_share", "--all"]
out = self.commandtest(command)
# The disk should have moved to utecl13, test_share_1 should be unused on
# utecl1
self.searchoutput(out,
r'Share: test_share_1\s*'
r'Bound to: ESX Cluster utecl1\s*'
r'Server: lnn30f1\s*'
r'Mountpoint: /vol/lnn30f1v1/test_share_1\s*'
r'Disk Count: 0\s*'
r'Machine Count: 0\s*',
command)
self.searchoutput(out,
r'Share: test_share_1\s*'
r'Bound to: ESX Cluster utecl13\s*'
r'Server: lnn30f1\s*'
r'Mountpoint: /vol/lnn30f1v1/test_share_1\s*'
r'Disk Count: 1\s*'
r'Machine Count: 1\s*',
command)
def testallowchangemetacluster_20(self):
command = ["search_machine", "--machine=evm1", "--cluster=utecl13"]
out = self.commandtest(command)
self.matchoutput(out, "evm1", command)
def testallowchangemetacluster_30(self):
command = ["update_machine", "--machine=evm1", "--cluster=utecl1",
"--allow_metacluster_change"]
# restore
out = self.commandtest(command)
def testfailfullcluster(self):
command = ["update_machine", "--machine=evm1", "--cluster=utecl3"]
out = self.badrequesttest(command)
self.matchoutput(out,
"ESX Cluster utecl3 cannot support VMs with "
"0 vmhosts and a down_hosts_threshold of 2",
command)
def testfailaddreadmachinetocluster(self):
command = ["update_machine", "--machine=ut9s03p19", "--cluster=utecl1"]
out = self.badrequesttest(command)
self.matchoutput(out, "Cannot convert a physical machine to virtual.",
command)
# These tests would be nice, but twisted just ignores the permission
# on the files since we're still the owner. Which is good, but means
# the recovery routines can't be easily tested.
# def testfailbrokenplenary(self):
# template = self.plenary_name("machine", "americas", "ut", "ut9",
# "ut9s03p20")
# os.chmod(template, 0000)
# command = ["update_machine", "--machine=ut9s03p20", "--serial=20"]
# out = self.badrequesttest(command)
# self.matchoutput(out, "FIXME", command)
# def testverifyfailbrokenplenary(self):
# # Fixing the previous breakage... not actually necessary for this test.
# template = self.plenary_name("machine", "americas", "ut", "ut9",
# "ut9s03p20")
# os.chmod(template, 0644)
# command = ["show_machine", "--machine=ut9s03p20"]
# out = self.commandtest(command)
# self.matchclean(out, "Serial", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateMachine)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 | -6,669,703,521,060,064,000 | 44.546845 | 81 | 0.559464 | false |
ydre/kit-soft | ImageProcessing/soobash/scanconverter.py | 1 | 1674 | __author__ = 'mehdibenchoufi'
from filereader import FileReader
from data import Data
import constants
import sys
sys.path.append('/usr/local/lib/python2.7/site-packages')
import cv2
class ScanConverter:
def get_input(self, value):
return self.input
def set_input(self, value):
self.input = value
def get_intermediate_input(self, value):
return self.intermediate_input
def set_intermediate_input(self, value):
self.intermediate_input = value
def get_output(self, value):
return self.output
def set_output(self, value):
self.output = value
def __init__(self, file_reader):
#self.file_reader = FileReader()
self.file_reader = file_reader
self.data = Data()
self.set_io(self.data)
def set_io(self, data):
self.set_input(data.get_src())
self.set_intermediate_input(data.get_intermediate_src())
self.set_output(data.get_destination())
def converter(self, filereader):
cv2.linearPolar(self.intermediate_input, (constants.CENTER_POINT_x,constants.CENTER_POINT_z), constants.SCAN_CONVERTER_SCALE, cv2.INTER_CUBIC + cv2.WARP_INVERSE_MAP, self.output)
cv2.imwrite('color_img.bmp', self.output)
cv2.imshow('image',self.output)
cv2.waitKey(0)
def convert(self, filereader):
rows = self.data.get_rows()
cols = self.data.get_cols()
for i in range(0,rows):
for j in range(0,cols):
self.input[i,j] = filereader.pixel_array[i*cols+j]
self.intermediate_input[i,j] = filereader.pixel_array[i*cols+j]
self.converter(filereader)
| bsd-3-clause | 401,495,650,909,379,650 | 28.892857 | 186 | 0.641577 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.7/Lib/test/test_winreg.py | 3 | 17729 | # Test the windows specific win32reg module.
# Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey
import os, sys
import unittest
from test import test_support
threading = test_support.import_module("threading")
from platform import machine
# Do this first so test will be skipped if module doesn't exist
test_support.import_module('_winreg')
# Now import everything
from _winreg import *
try:
REMOTE_NAME = sys.argv[sys.argv.index("--remote")+1]
except (IndexError, ValueError):
REMOTE_NAME = None
# tuple of (major, minor)
WIN_VER = sys.getwindowsversion()[:2]
# Some tests should only run on 64-bit architectures where WOW64 will be.
WIN64_MACHINE = True if machine() == "AMD64" else False
# Starting with Windows 7 and Windows Server 2008 R2, WOW64 no longer uses
# registry reflection and formerly reflected keys are shared instead.
# Windows 7 and Windows Server 2008 R2 are version 6.1. Due to this, some
# tests are only valid up until 6.1
HAS_REFLECTION = True if WIN_VER < (6, 1) else False
test_key_name = "SOFTWARE\\Python Registry Test Key - Delete Me"
# On OS'es that support reflection we should test with a reflected key
test_reflect_key_name = "SOFTWARE\\Classes\\Python Test Key - Delete Me"
test_data = [
("Int Value", 45, REG_DWORD),
("String Val", "A string value", REG_SZ),
("StringExpand", "The path is %path%", REG_EXPAND_SZ),
("Multi-string", ["Lots", "of", "string", "values"], REG_MULTI_SZ),
("Raw Data", ("binary"+chr(0)+"data"), REG_BINARY),
("Big String", "x"*(2**14-1), REG_SZ),
("Big Binary", "x"*(2**14), REG_BINARY),
]
if test_support.have_unicode:
test_data += [
(unicode("Unicode Val"), unicode("A Unicode value"), REG_SZ,),
("UnicodeExpand", unicode("The path is %path%"), REG_EXPAND_SZ),
("Multi-unicode", [unicode("Lots"), unicode("of"), unicode("unicode"),
unicode("values")], REG_MULTI_SZ),
("Multi-mixed", [unicode("Unicode"), unicode("and"), "string",
"values"], REG_MULTI_SZ),
]
class BaseWinregTests(unittest.TestCase):
def setUp(self):
# Make sure that the test key is absent when the test
# starts.
self.delete_tree(HKEY_CURRENT_USER, test_key_name)
def delete_tree(self, root, subkey):
try:
hkey = OpenKey(root, subkey, KEY_ALL_ACCESS)
except WindowsError:
# subkey does not exist
return
while True:
try:
subsubkey = EnumKey(hkey, 0)
except WindowsError:
# no more subkeys
break
self.delete_tree(hkey, subsubkey)
CloseKey(hkey)
DeleteKey(root, subkey)
def _write_test_data(self, root_key, CreateKey=CreateKey):
# Set the default value for this key.
SetValue(root_key, test_key_name, REG_SZ, "Default value")
key = CreateKey(root_key, test_key_name)
# Create a sub-key
sub_key = CreateKey(key, "sub_key")
# Give the sub-key some named values
for value_name, value_data, value_type in test_data:
SetValueEx(sub_key, value_name, 0, value_type, value_data)
# Check we wrote as many items as we thought.
nkeys, nvalues, since_mod = QueryInfoKey(key)
self.assertEquals(nkeys, 1, "Not the correct number of sub keys")
self.assertEquals(nvalues, 1, "Not the correct number of values")
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEquals(nkeys, 0, "Not the correct number of sub keys")
self.assertEquals(nvalues, len(test_data),
"Not the correct number of values")
# Close this key this way...
# (but before we do, copy the key as an integer - this allows
# us to test that the key really gets closed).
int_sub_key = int(sub_key)
CloseKey(sub_key)
try:
QueryInfoKey(int_sub_key)
self.fail("It appears the CloseKey() function does "
"not close the actual key!")
except EnvironmentError:
pass
# ... and close that key that way :-)
int_key = int(key)
key.Close()
try:
QueryInfoKey(int_key)
self.fail("It appears the key.Close() function "
"does not close the actual key!")
except EnvironmentError:
pass
def _read_test_data(self, root_key, OpenKey=OpenKey):
# Check we can get default value for this key.
val = QueryValue(root_key, test_key_name)
self.assertEquals(val, "Default value",
"Registry didn't give back the correct value")
key = OpenKey(root_key, test_key_name)
# Read the sub-keys
with OpenKey(key, "sub_key") as sub_key:
# Check I can enumerate over the values.
index = 0
while 1:
try:
data = EnumValue(sub_key, index)
except EnvironmentError:
break
self.assertIn(data, test_data,
"Didn't read back the correct test data")
index = index + 1
self.assertEquals(index, len(test_data),
"Didn't read the correct number of items")
# Check I can directly access each item
for value_name, value_data, value_type in test_data:
read_val, read_typ = QueryValueEx(sub_key, value_name)
self.assertEquals(read_val, value_data,
"Could not directly read the value")
self.assertEquals(read_typ, value_type,
"Could not directly read the value")
sub_key.Close()
# Enumerate our main key.
read_val = EnumKey(key, 0)
self.assertEquals(read_val, "sub_key", "Read subkey value wrong")
try:
EnumKey(key, 1)
self.fail("Was able to get a second key when I only have one!")
except EnvironmentError:
pass
key.Close()
def _delete_test_data(self, root_key):
key = OpenKey(root_key, test_key_name, 0, KEY_ALL_ACCESS)
sub_key = OpenKey(key, "sub_key", 0, KEY_ALL_ACCESS)
# It is not necessary to delete the values before deleting
# the key (although subkeys must not exist). We delete them
# manually just to prove we can :-)
for value_name, value_data, value_type in test_data:
DeleteValue(sub_key, value_name)
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEquals(nkeys, 0, "subkey not empty before delete")
self.assertEquals(nvalues, 0, "subkey not empty before delete")
sub_key.Close()
DeleteKey(key, "sub_key")
try:
# Shouldnt be able to delete it twice!
DeleteKey(key, "sub_key")
self.fail("Deleting the key twice succeeded")
except EnvironmentError:
pass
key.Close()
DeleteKey(root_key, test_key_name)
# Opening should now fail!
try:
key = OpenKey(root_key, test_key_name)
self.fail("Could open the non-existent key")
except WindowsError: # Use this error name this time
pass
def _test_all(self, root_key):
self._write_test_data(root_key)
self._read_test_data(root_key)
self._delete_test_data(root_key)
class LocalWinregTests(BaseWinregTests):
def test_registry_works(self):
self._test_all(HKEY_CURRENT_USER)
def test_registry_works_extended_functions(self):
# Substitute the regular CreateKey and OpenKey calls with their
# extended counterparts.
# Note: DeleteKeyEx is not used here because it is platform dependent
cke = lambda key, sub_key: CreateKeyEx(key, sub_key, 0, KEY_ALL_ACCESS)
self._write_test_data(HKEY_CURRENT_USER, cke)
oke = lambda key, sub_key: OpenKeyEx(key, sub_key, 0, KEY_READ)
self._read_test_data(HKEY_CURRENT_USER, oke)
self._delete_test_data(HKEY_CURRENT_USER)
def test_connect_registry_to_local_machine_works(self):
# perform minimal ConnectRegistry test which just invokes it
h = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
self.assertNotEqual(h.handle, 0)
h.Close()
self.assertEqual(h.handle, 0)
def test_inexistant_remote_registry(self):
connect = lambda: ConnectRegistry("abcdefghijkl", HKEY_CURRENT_USER)
self.assertRaises(WindowsError, connect)
def test_expand_environment_strings(self):
r = ExpandEnvironmentStrings(u"%windir%\\test")
self.assertEqual(type(r), unicode)
self.assertEqual(r, os.environ["windir"] + "\\test")
def test_context_manager(self):
# ensure that the handle is closed if an exception occurs
try:
with ConnectRegistry(None, HKEY_LOCAL_MACHINE) as h:
self.assertNotEqual(h.handle, 0)
raise WindowsError
except WindowsError:
self.assertEqual(h.handle, 0)
def test_changing_value(self):
# Issue2810: A race condition in 2.6 and 3.1 may cause
# EnumValue or QueryValue to throw "WindowsError: More data is
# available"
done = False
class VeryActiveThread(threading.Thread):
def run(self):
with CreateKey(HKEY_CURRENT_USER, test_key_name) as key:
use_short = True
long_string = 'x'*2000
while not done:
s = 'x' if use_short else long_string
use_short = not use_short
SetValue(key, 'changing_value', REG_SZ, s)
thread = VeryActiveThread()
thread.start()
try:
with CreateKey(HKEY_CURRENT_USER,
test_key_name+'\\changing_value') as key:
for _ in range(1000):
num_subkeys, num_values, t = QueryInfoKey(key)
for i in range(num_values):
name = EnumValue(key, i)
QueryValue(key, name[0])
finally:
done = True
thread.join()
DeleteKey(HKEY_CURRENT_USER, test_key_name+'\\changing_value')
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_long_key(self):
# Issue2810, in 2.6 and 3.1 when the key name was exactly 256
# characters, EnumKey threw "WindowsError: More data is
# available"
name = 'x'*256
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as key:
SetValue(key, name, REG_SZ, 'x')
num_subkeys, num_values, t = QueryInfoKey(key)
EnumKey(key, 0)
finally:
DeleteKey(HKEY_CURRENT_USER, '\\'.join((test_key_name, name)))
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_dynamic_key(self):
# Issue2810, when the value is dynamically generated, these
# throw "WindowsError: More data is available" in 2.6 and 3.1
EnumValue(HKEY_PERFORMANCE_DATA, 0)
QueryValueEx(HKEY_PERFORMANCE_DATA, None)
# Reflection requires XP x64/Vista at a minimum. XP doesn't have this stuff
# or DeleteKeyEx so make sure their use raises NotImplementedError
@unittest.skipUnless(WIN_VER < (5, 2), "Requires Windows XP")
def test_reflection_unsupported(self):
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck:
self.assertNotEqual(ck.handle, 0)
key = OpenKey(HKEY_CURRENT_USER, test_key_name)
self.assertNotEqual(key.handle, 0)
with self.assertRaises(NotImplementedError):
DisableReflectionKey(key)
with self.assertRaises(NotImplementedError):
EnableReflectionKey(key)
with self.assertRaises(NotImplementedError):
QueryReflectionKey(key)
with self.assertRaises(NotImplementedError):
DeleteKeyEx(HKEY_CURRENT_USER, test_key_name)
finally:
DeleteKey(HKEY_CURRENT_USER, test_key_name)
@unittest.skipUnless(REMOTE_NAME, "Skipping remote registry tests")
class RemoteWinregTests(BaseWinregTests):
def test_remote_registry_works(self):
remote_key = ConnectRegistry(REMOTE_NAME, HKEY_CURRENT_USER)
self._test_all(remote_key)
@unittest.skipUnless(WIN64_MACHINE, "x64 specific registry tests")
class Win64WinregTests(BaseWinregTests):
def test_reflection_functions(self):
# Test that we can call the query, enable, and disable functions
# on a key which isn't on the reflection list with no consequences.
with OpenKey(HKEY_LOCAL_MACHINE, "Software") as key:
# HKLM\Software is redirected but not reflected in all OSes
self.assertTrue(QueryReflectionKey(key))
self.assertEquals(None, EnableReflectionKey(key))
self.assertEquals(None, DisableReflectionKey(key))
self.assertTrue(QueryReflectionKey(key))
@unittest.skipUnless(HAS_REFLECTION, "OS doesn't support reflection")
def test_reflection(self):
# Test that we can create, open, and delete keys in the 32-bit
# area. Because we are doing this in a key which gets reflected,
# test the differences of 32 and 64-bit keys before and after the
# reflection occurs (ie. when the created key is closed).
try:
with CreateKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as created_key:
self.assertNotEqual(created_key.handle, 0)
# The key should now be available in the 32-bit area
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as key:
self.assertNotEqual(key.handle, 0)
# Write a value to what currently is only in the 32-bit area
SetValueEx(created_key, "", 0, REG_SZ, "32KEY")
# The key is not reflected until created_key is closed.
# The 64-bit version of the key should not be available yet.
open_fail = lambda: OpenKey(HKEY_CURRENT_USER,
test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_64KEY)
self.assertRaises(WindowsError, open_fail)
# Now explicitly open the 64-bit version of the key
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_64KEY) as key:
self.assertNotEqual(key.handle, 0)
# Make sure the original value we set is there
self.assertEqual("32KEY", QueryValue(key, ""))
# Set a new value, which will get reflected to 32-bit
SetValueEx(key, "", 0, REG_SZ, "64KEY")
# Reflection uses a "last-writer wins policy, so the value we set
# on the 64-bit key should be the same on 32-bit
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_32KEY) as key:
self.assertEqual("64KEY", QueryValue(key, ""))
finally:
DeleteKeyEx(HKEY_CURRENT_USER, test_reflect_key_name,
KEY_WOW64_32KEY, 0)
@unittest.skipUnless(HAS_REFLECTION, "OS doesn't support reflection")
def test_disable_reflection(self):
# Make use of a key which gets redirected and reflected
try:
with CreateKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as created_key:
# QueryReflectionKey returns whether or not the key is disabled
disabled = QueryReflectionKey(created_key)
self.assertEqual(type(disabled), bool)
# HKCU\Software\Classes is reflected by default
self.assertFalse(disabled)
DisableReflectionKey(created_key)
self.assertTrue(QueryReflectionKey(created_key))
# The key is now closed and would normally be reflected to the
# 64-bit area, but let's make sure that didn't happen.
open_fail = lambda: OpenKeyEx(HKEY_CURRENT_USER,
test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_64KEY)
self.assertRaises(WindowsError, open_fail)
# Make sure the 32-bit key is actually there
with OpenKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_32KEY) as key:
self.assertNotEqual(key.handle, 0)
finally:
DeleteKeyEx(HKEY_CURRENT_USER, test_reflect_key_name,
KEY_WOW64_32KEY, 0)
def test_main():
test_support.run_unittest(LocalWinregTests, RemoteWinregTests,
Win64WinregTests)
if __name__ == "__main__":
if not REMOTE_NAME:
print "Remote registry calls can be tested using",
print "'test_winreg.py --remote \\\\machine_name'"
test_main()
| mit | -8,097,605,125,872,690,000 | 41.823671 | 79 | 0.585538 | false |
toxygen-project/toxygen | toxygen/list_items.py | 2 | 24327 | from toxcore_enums_and_consts import *
from PyQt5 import QtCore, QtGui, QtWidgets
import profile
from file_transfers import TOX_FILE_TRANSFER_STATE, PAUSED_FILE_TRANSFERS, DO_NOT_SHOW_ACCEPT_BUTTON, ACTIVE_FILE_TRANSFERS, SHOW_PROGRESS_BAR
from util import curr_directory, convert_time, curr_time
from widgets import DataLabel, create_menu
import html as h
import smileys
import settings
import re
class MessageEdit(QtWidgets.QTextBrowser):
def __init__(self, text, width, message_type, parent=None):
super(MessageEdit, self).__init__(parent)
self.urls = {}
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setWordWrapMode(QtGui.QTextOption.WrapAtWordBoundaryOrAnywhere)
self.document().setTextWidth(width)
self.setOpenExternalLinks(True)
self.setAcceptRichText(True)
self.setOpenLinks(False)
path = smileys.SmileyLoader.get_instance().get_smileys_path()
if path is not None:
self.setSearchPaths([path])
self.document().setDefaultStyleSheet('a { color: #306EFF; }')
text = self.decoratedText(text)
if message_type != TOX_MESSAGE_TYPE['NORMAL']:
self.setHtml('<p style="color: #5CB3FF; font: italic; font-size: 20px;" >' + text + '</p>')
else:
self.setHtml(text)
font = QtGui.QFont()
font.setFamily(settings.Settings.get_instance()['font'])
font.setPixelSize(settings.Settings.get_instance()['message_font_size'])
font.setBold(False)
self.setFont(font)
self.resize(width, self.document().size().height())
self.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse | QtCore.Qt.LinksAccessibleByMouse)
self.anchorClicked.connect(self.on_anchor_clicked)
def contextMenuEvent(self, event):
menu = create_menu(self.createStandardContextMenu(event.pos()))
quote = menu.addAction(QtWidgets.QApplication.translate("MainWindow", 'Quote selected text'))
quote.triggered.connect(self.quote_text)
text = self.textCursor().selection().toPlainText()
if not text:
quote.setEnabled(False)
else:
import plugin_support
submenu = plugin_support.PluginLoader.get_instance().get_message_menu(menu, text)
if len(submenu):
plug = menu.addMenu(QtWidgets.QApplication.translate("MainWindow", 'Plugins'))
plug.addActions(submenu)
menu.popup(event.globalPos())
menu.exec_(event.globalPos())
del menu
def quote_text(self):
text = self.textCursor().selection().toPlainText()
if text:
import mainscreen
window = mainscreen.MainWindow.get_instance()
text = '>' + '\n>'.join(text.split('\n'))
if window.messageEdit.toPlainText():
text = '\n' + text
window.messageEdit.appendPlainText(text)
def on_anchor_clicked(self, url):
text = str(url.toString())
if text.startswith('tox:'):
import menu
self.add_contact = menu.AddContact(text[4:])
self.add_contact.show()
else:
QtGui.QDesktopServices.openUrl(url)
self.clearFocus()
def addAnimation(self, url, fileName):
movie = QtGui.QMovie(self)
movie.setFileName(fileName)
self.urls[movie] = url
movie.frameChanged[int].connect(lambda x: self.animate(movie))
movie.start()
def animate(self, movie):
self.document().addResource(QtGui.QTextDocument.ImageResource,
self.urls[movie],
movie.currentPixmap())
self.setLineWrapColumnOrWidth(self.lineWrapColumnOrWidth())
def decoratedText(self, text):
text = h.escape(text) # replace < and >
exp = QtCore.QRegExp(
'('
'(?:\\b)((www\\.)|(http[s]?|ftp)://)'
'\\w+\\S+)'
'|(?:\\b)(file:///)([\\S| ]*)'
'|(?:\\b)(tox:[a-zA-Z\\d]{76}$)'
'|(?:\\b)(mailto:\\S+@\\S+\\.\\S+)'
'|(?:\\b)(tox:\\S+@\\S+)')
offset = exp.indexIn(text, 0)
while offset != -1: # add links
url = exp.cap()
if exp.cap(2) == 'www.':
html = '<a href="http://{0}">{0}</a>'.format(url)
else:
html = '<a href="{0}">{0}</a>'.format(url)
text = text[:offset] + html + text[offset + len(exp.cap()):]
offset += len(html)
offset = exp.indexIn(text, offset)
arr = text.split('\n')
for i in range(len(arr)): # quotes
if arr[i].startswith('>'):
arr[i] = '<font color="green"><b>' + arr[i][4:] + '</b></font>'
text = '<br>'.join(arr)
text = smileys.SmileyLoader.get_instance().add_smileys_to_text(text, self) # smileys
return text
class MessageItem(QtWidgets.QWidget):
"""
Message in messages list
"""
def __init__(self, text, time, user='', sent=True, message_type=TOX_MESSAGE_TYPE['NORMAL'], parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.name = DataLabel(self)
self.name.setGeometry(QtCore.QRect(2, 2, 95, 23))
self.name.setTextFormat(QtCore.Qt.PlainText)
font = QtGui.QFont()
font.setFamily(settings.Settings.get_instance()['font'])
font.setPointSize(11)
font.setBold(True)
self.name.setFont(font)
self.name.setText(user)
self.time = QtWidgets.QLabel(self)
self.time.setGeometry(QtCore.QRect(parent.width() - 60, 0, 50, 25))
font.setPointSize(10)
font.setBold(False)
self.time.setFont(font)
self._time = time
if not sent:
movie = QtGui.QMovie(curr_directory() + '/images/spinner.gif')
self.time.setMovie(movie)
movie.start()
self.t = True
else:
self.time.setText(convert_time(time))
self.t = False
self.message = MessageEdit(text, parent.width() - 160, message_type, self)
if message_type != TOX_MESSAGE_TYPE['NORMAL']:
self.name.setStyleSheet("QLabel { color: #5CB3FF; }")
self.message.setAlignment(QtCore.Qt.AlignCenter)
self.time.setStyleSheet("QLabel { color: #5CB3FF; }")
self.message.setGeometry(QtCore.QRect(100, 0, parent.width() - 160, self.message.height()))
self.setFixedHeight(self.message.height())
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.RightButton and event.x() > self.time.x():
self.listMenu = QtWidgets.QMenu()
delete_item = self.listMenu.addAction(QtWidgets.QApplication.translate("MainWindow", 'Delete message'))
delete_item.triggered.connect(self.delete)
parent_position = self.time.mapToGlobal(QtCore.QPoint(0, 0))
self.listMenu.move(parent_position)
self.listMenu.show()
def delete(self):
pr = profile.Profile.get_instance()
pr.delete_message(self._time)
def mark_as_sent(self):
if self.t:
self.time.setText(convert_time(self._time))
self.t = False
return True
return False
def set_avatar(self, pixmap):
self.name.setAlignment(QtCore.Qt.AlignCenter)
self.message.setAlignment(QtCore.Qt.AlignVCenter)
self.setFixedHeight(max(self.height(), 36))
self.name.setFixedHeight(self.height())
self.message.setFixedHeight(self.height())
self.name.setPixmap(pixmap.scaled(30, 30, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation))
def select_text(self, text):
tmp = self.message.toHtml()
text = h.escape(text)
strings = re.findall(text, tmp, flags=re.IGNORECASE)
for s in strings:
tmp = self.replace_all(tmp, s)
self.message.setHtml(tmp)
@staticmethod
def replace_all(text, substring):
i, l = 0, len(substring)
while i < len(text) - l + 1:
index = text[i:].find(substring)
if index == -1:
break
i += index
lgt, rgt = text[i:].find('<'), text[i:].find('>')
if rgt < lgt:
i += rgt + 1
continue
sub = '<font color="red"><b>{}</b></font>'.format(substring)
text = text[:i] + sub + text[i + l:]
i += len(sub)
return text
class ContactItem(QtWidgets.QWidget):
"""
Contact in friends list
"""
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
mode = settings.Settings.get_instance()['compact_mode']
self.setBaseSize(QtCore.QSize(250, 40 if mode else 70))
self.avatar_label = QtWidgets.QLabel(self)
size = 32 if mode else 64
self.avatar_label.setGeometry(QtCore.QRect(3, 4, size, size))
self.avatar_label.setScaledContents(False)
self.avatar_label.setAlignment(QtCore.Qt.AlignCenter)
self.name = DataLabel(self)
self.name.setGeometry(QtCore.QRect(50 if mode else 75, 3 if mode else 10, 150, 15 if mode else 25))
font = QtGui.QFont()
font.setFamily(settings.Settings.get_instance()['font'])
font.setPointSize(10 if mode else 12)
font.setBold(True)
self.name.setFont(font)
self.status_message = DataLabel(self)
self.status_message.setGeometry(QtCore.QRect(50 if mode else 75, 20 if mode else 30, 170, 15 if mode else 20))
font.setPointSize(10)
font.setBold(False)
self.status_message.setFont(font)
self.connection_status = StatusCircle(self)
self.connection_status.setGeometry(QtCore.QRect(230, -2 if mode else 5, 32, 32))
self.messages = UnreadMessagesCount(self)
self.messages.setGeometry(QtCore.QRect(20 if mode else 52, 20 if mode else 50, 30, 20))
class StatusCircle(QtWidgets.QWidget):
"""
Connection status
"""
def __init__(self, parent):
QtWidgets.QWidget.__init__(self, parent)
self.setGeometry(0, 0, 32, 32)
self.label = QtWidgets.QLabel(self)
self.label.setGeometry(QtCore.QRect(0, 0, 32, 32))
self.unread = False
def update(self, status, unread_messages=None):
if unread_messages is None:
unread_messages = self.unread
else:
self.unread = unread_messages
if status == TOX_USER_STATUS['NONE']:
name = 'online'
elif status == TOX_USER_STATUS['AWAY']:
name = 'idle'
elif status == TOX_USER_STATUS['BUSY']:
name = 'busy'
else:
name = 'offline'
if unread_messages:
name += '_notification'
self.label.setGeometry(QtCore.QRect(0, 0, 32, 32))
else:
self.label.setGeometry(QtCore.QRect(2, 0, 32, 32))
pixmap = QtGui.QPixmap(curr_directory() + '/images/{}.png'.format(name))
self.label.setPixmap(pixmap)
class UnreadMessagesCount(QtWidgets.QWidget):
def __init__(self, parent=None):
super(UnreadMessagesCount, self).__init__(parent)
self.resize(30, 20)
self.label = QtWidgets.QLabel(self)
self.label.setGeometry(QtCore.QRect(0, 0, 30, 20))
self.label.setVisible(False)
font = QtGui.QFont()
font.setFamily(settings.Settings.get_instance()['font'])
font.setPointSize(12)
font.setBold(True)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignCenter)
color = settings.Settings.get_instance()['unread_color']
self.label.setStyleSheet('QLabel { color: white; background-color: ' + color + '; border-radius: 10; }')
def update(self, messages_count):
color = settings.Settings.get_instance()['unread_color']
self.label.setStyleSheet('QLabel { color: white; background-color: ' + color + '; border-radius: 10; }')
if messages_count:
self.label.setVisible(True)
self.label.setText(str(messages_count))
else:
self.label.setVisible(False)
class FileTransferItem(QtWidgets.QListWidget):
def __init__(self, file_name, size, time, user, friend_number, file_number, state, width, parent=None):
QtWidgets.QListWidget.__init__(self, parent)
self.resize(QtCore.QSize(width, 34))
if state == TOX_FILE_TRANSFER_STATE['CANCELLED']:
self.setStyleSheet('QListWidget { border: 1px solid #B40404; }')
elif state in PAUSED_FILE_TRANSFERS:
self.setStyleSheet('QListWidget { border: 1px solid #FF8000; }')
else:
self.setStyleSheet('QListWidget { border: 1px solid green; }')
self.state = state
self.name = DataLabel(self)
self.name.setGeometry(QtCore.QRect(3, 7, 95, 25))
self.name.setTextFormat(QtCore.Qt.PlainText)
font = QtGui.QFont()
font.setFamily(settings.Settings.get_instance()['font'])
font.setPointSize(11)
font.setBold(True)
self.name.setFont(font)
self.name.setText(user)
self.time = QtWidgets.QLabel(self)
self.time.setGeometry(QtCore.QRect(width - 60, 7, 50, 25))
font.setPointSize(10)
font.setBold(False)
self.time.setFont(font)
self.time.setText(convert_time(time))
self.cancel = QtWidgets.QPushButton(self)
self.cancel.setGeometry(QtCore.QRect(width - 125, 2, 30, 30))
pixmap = QtGui.QPixmap(curr_directory() + '/images/decline.png')
icon = QtGui.QIcon(pixmap)
self.cancel.setIcon(icon)
self.cancel.setIconSize(QtCore.QSize(30, 30))
self.cancel.setVisible(state in ACTIVE_FILE_TRANSFERS)
self.cancel.clicked.connect(lambda: self.cancel_transfer(friend_number, file_number))
self.cancel.setStyleSheet('QPushButton:hover { border: 1px solid #3A3939; background-color: none;}')
self.accept_or_pause = QtWidgets.QPushButton(self)
self.accept_or_pause.setGeometry(QtCore.QRect(width - 170, 2, 30, 30))
if state == TOX_FILE_TRANSFER_STATE['INCOMING_NOT_STARTED']:
self.accept_or_pause.setVisible(True)
self.button_update('accept')
elif state in DO_NOT_SHOW_ACCEPT_BUTTON:
self.accept_or_pause.setVisible(False)
elif state == TOX_FILE_TRANSFER_STATE['PAUSED_BY_USER']: # setup for continue
self.accept_or_pause.setVisible(True)
self.button_update('resume')
else: # pause
self.accept_or_pause.setVisible(True)
self.button_update('pause')
self.accept_or_pause.clicked.connect(lambda: self.accept_or_pause_transfer(friend_number, file_number, size))
self.accept_or_pause.setStyleSheet('QPushButton:hover { border: 1px solid #3A3939; background-color: none}')
self.pb = QtWidgets.QProgressBar(self)
self.pb.setGeometry(QtCore.QRect(100, 7, 100, 20))
self.pb.setValue(0)
self.pb.setStyleSheet('QProgressBar { background-color: #302F2F; }')
self.pb.setVisible(state in SHOW_PROGRESS_BAR)
self.file_name = DataLabel(self)
self.file_name.setGeometry(QtCore.QRect(210, 7, width - 420, 20))
font.setPointSize(12)
self.file_name.setFont(font)
file_size = size // 1024
if not file_size:
file_size = '{}B'.format(size)
elif file_size >= 1024:
file_size = '{}MB'.format(file_size // 1024)
else:
file_size = '{}KB'.format(file_size)
file_data = '{} {}'.format(file_size, file_name)
self.file_name.setText(file_data)
self.file_name.setToolTip(file_name)
self.saved_name = file_name
self.time_left = QtWidgets.QLabel(self)
self.time_left.setGeometry(QtCore.QRect(width - 92, 7, 30, 20))
font.setPointSize(10)
self.time_left.setFont(font)
self.time_left.setVisible(state == TOX_FILE_TRANSFER_STATE['RUNNING'])
self.setFocusPolicy(QtCore.Qt.NoFocus)
self.paused = False
def cancel_transfer(self, friend_number, file_number):
pr = profile.Profile.get_instance()
pr.cancel_transfer(friend_number, file_number)
self.setStyleSheet('QListWidget { border: 1px solid #B40404; }')
self.cancel.setVisible(False)
self.accept_or_pause.setVisible(False)
self.pb.setVisible(False)
def accept_or_pause_transfer(self, friend_number, file_number, size):
if self.state == TOX_FILE_TRANSFER_STATE['INCOMING_NOT_STARTED']:
directory = QtWidgets.QFileDialog.getExistingDirectory(self,
QtWidgets.QApplication.translate("MainWindow", 'Choose folder'),
curr_directory(),
QtWidgets.QFileDialog.ShowDirsOnly | QtWidgets.QFileDialog.DontUseNativeDialog)
self.pb.setVisible(True)
if directory:
pr = profile.Profile.get_instance()
pr.accept_transfer(self, directory + '/' + self.saved_name, friend_number, file_number, size)
self.button_update('pause')
elif self.state == TOX_FILE_TRANSFER_STATE['PAUSED_BY_USER']: # resume
self.paused = False
profile.Profile.get_instance().resume_transfer(friend_number, file_number)
self.button_update('pause')
self.state = TOX_FILE_TRANSFER_STATE['RUNNING']
else: # pause
self.paused = True
self.state = TOX_FILE_TRANSFER_STATE['PAUSED_BY_USER']
profile.Profile.get_instance().pause_transfer(friend_number, file_number)
self.button_update('resume')
self.accept_or_pause.clearFocus()
def button_update(self, path):
pixmap = QtGui.QPixmap(curr_directory() + '/images/{}.png'.format(path))
icon = QtGui.QIcon(pixmap)
self.accept_or_pause.setIcon(icon)
self.accept_or_pause.setIconSize(QtCore.QSize(30, 30))
def update_transfer_state(self, state, progress, time):
self.pb.setValue(int(progress * 100))
if time + 1:
m, s = divmod(time, 60)
self.time_left.setText('{0:02d}:{1:02d}'.format(m, s))
if self.state != state and self.state in ACTIVE_FILE_TRANSFERS:
if state == TOX_FILE_TRANSFER_STATE['CANCELLED']:
self.setStyleSheet('QListWidget { border: 1px solid #B40404; }')
self.cancel.setVisible(False)
self.accept_or_pause.setVisible(False)
self.pb.setVisible(False)
self.state = state
self.time_left.setVisible(False)
elif state == TOX_FILE_TRANSFER_STATE['FINISHED']:
self.accept_or_pause.setVisible(False)
self.pb.setVisible(False)
self.cancel.setVisible(False)
self.setStyleSheet('QListWidget { border: 1px solid green; }')
self.state = state
self.time_left.setVisible(False)
elif state == TOX_FILE_TRANSFER_STATE['PAUSED_BY_FRIEND']:
self.accept_or_pause.setVisible(False)
self.setStyleSheet('QListWidget { border: 1px solid #FF8000; }')
self.state = state
self.time_left.setVisible(False)
elif state == TOX_FILE_TRANSFER_STATE['PAUSED_BY_USER']:
self.button_update('resume') # setup button continue
self.setStyleSheet('QListWidget { border: 1px solid green; }')
self.state = state
self.time_left.setVisible(False)
elif state == TOX_FILE_TRANSFER_STATE['OUTGOING_NOT_STARTED']:
self.setStyleSheet('QListWidget { border: 1px solid #FF8000; }')
self.accept_or_pause.setVisible(False)
self.time_left.setVisible(False)
self.pb.setVisible(False)
elif not self.paused: # active
self.pb.setVisible(True)
self.accept_or_pause.setVisible(True) # setup to pause
self.button_update('pause')
self.setStyleSheet('QListWidget { border: 1px solid green; }')
self.state = state
self.time_left.setVisible(True)
def mark_as_sent(self):
return False
class UnsentFileItem(FileTransferItem):
def __init__(self, file_name, size, user, time, width, parent=None):
super(UnsentFileItem, self).__init__(file_name, size, time, user, -1, -1,
TOX_FILE_TRANSFER_STATE['PAUSED_BY_FRIEND'], width, parent)
self._time = time
self.pb.setVisible(False)
movie = QtGui.QMovie(curr_directory() + '/images/spinner.gif')
self.time.setMovie(movie)
movie.start()
def cancel_transfer(self, *args):
pr = profile.Profile.get_instance()
pr.cancel_not_started_transfer(self._time)
class InlineImageItem(QtWidgets.QScrollArea):
def __init__(self, data, width, elem):
QtWidgets.QScrollArea.__init__(self)
self.setFocusPolicy(QtCore.Qt.NoFocus)
self._elem = elem
self._image_label = QtWidgets.QLabel(self)
self._image_label.raise_()
self.setWidget(self._image_label)
self._image_label.setScaledContents(False)
self._pixmap = QtGui.QPixmap()
self._pixmap.loadFromData(data, 'PNG')
self._max_size = width - 30
self._resize_needed = not (self._pixmap.width() <= self._max_size)
self._full_size = not self._resize_needed
if not self._resize_needed:
self._image_label.setPixmap(self._pixmap)
self.resize(QtCore.QSize(self._max_size + 5, self._pixmap.height() + 5))
self._image_label.setGeometry(5, 0, self._pixmap.width(), self._pixmap.height())
else:
pixmap = self._pixmap.scaled(self._max_size, self._max_size, QtCore.Qt.KeepAspectRatio)
self._image_label.setPixmap(pixmap)
self.resize(QtCore.QSize(self._max_size + 5, pixmap.height()))
self._image_label.setGeometry(5, 0, self._max_size + 5, pixmap.height())
self._elem.setSizeHint(QtCore.QSize(self.width(), self.height()))
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton and self._resize_needed: # scale inline
if self._full_size:
pixmap = self._pixmap.scaled(self._max_size, self._max_size, QtCore.Qt.KeepAspectRatio)
self._image_label.setPixmap(pixmap)
self.resize(QtCore.QSize(self._max_size, pixmap.height()))
self._image_label.setGeometry(5, 0, pixmap.width(), pixmap.height())
else:
self._image_label.setPixmap(self._pixmap)
self.resize(QtCore.QSize(self._max_size, self._pixmap.height() + 17))
self._image_label.setGeometry(5, 0, self._pixmap.width(), self._pixmap.height())
self._full_size = not self._full_size
self._elem.setSizeHint(QtCore.QSize(self.width(), self.height()))
elif event.button() == QtCore.Qt.RightButton: # save inline
directory = QtWidgets.QFileDialog.getExistingDirectory(self,
QtWidgets.QApplication.translate("MainWindow",
'Choose folder'),
curr_directory(),
QtWidgets.QFileDialog.ShowDirsOnly | QtWidgets.QFileDialog.DontUseNativeDialog)
if directory:
fl = QtCore.QFile(directory + '/toxygen_inline_' + curr_time().replace(':', '_') + '.png')
self._pixmap.save(fl, 'PNG')
def mark_as_sent(self):
return False
| gpl-3.0 | 6,860,497,635,885,465,000 | 43.636697 | 146 | 0.591688 | false |
ShadowKyogre/mypaint | gui/factoryaction.py | 4 | 3718 | # This file is part of MyPaint.
# Copyright (C) 2013 by Andrew Chadwick <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Factory for creating custom toolbar and manu items via UIManager"""
from __future__ import division, print_function
from warnings import warn
import gi
from gi.repository import Gtk
from gi.repository import GObject
class FactoryAction (Gtk.Action):
"""Generic factory action for UI components.
Define an instance of the factory once for each family of custom proxy
classes (GtkToolItem, GtkMenuItem etc.) that you need. Each instance must
be named after the custom proxy classes the factory is to produce, so the
defition is best done in a central snippet of GtkBuilder XML.
For example, if the factory is named ``FooBar``, then its proxy ToolItems
are expected to have ``__gtype_name__``s of``"MyPaintFooBarToolItem``.
Creation of proxies can then be done cleanly in the GtkUIManager XML
definitions by referring to the factory's name as many times as needed.
"""
#: Predictable name for GtkBuilder XML.
__gtype_name__ = "MyPaintFactoryAction"
#: The pattern to use when instantiating a tool item
TOOL_ITEM_NAME_PATTERN = "MyPaint%sToolItem"
#: The pattern to use when instantiating a menu item
MENU_ITEM_NAME_PATTERN = "MyPaint%sMenuItem"
def __init__(self, *a):
# GtkAction's own constructor requires params which are all set up by
# Builder. It warns noisily, so bypass it and invoke its parent
# class's.
super(Gtk.Action, self).__init__()
def do_create_tool_item(self):
"""Returns a new ToolItem
Invoked by UIManager when it needs a GtkToolItem proxy for a toolbar.
This method instantiates and returns a new widget from a class named
after the factory action's own name. Class lookup is done via GObject:
see `TOOL_ITEM_NAME_PATTERN` for the ``__gtype_name__`` this method
will expect.
"""
gtype_name = self.TOOL_ITEM_NAME_PATTERN % (self.get_name(),)
tool_item = self._construct(gtype_name)
tool_item.connect("parent-set", self._tool_item_parent_set)
return tool_item
def do_create_menu_item(self):
"""Returns a new MenuItem
Invoked by UIManager when it needs a MenuItem proxy for a menu.
This method instantiates and returns a new widget from a class named
after the factory action's own name. Class lookup is done via GObject:
see `TOOL_ITEM_NAME_PATTERN` for the ``__gtype_name__`` this method
will expect.
"""
gtype_name = self.MENU_ITEM_NAME_PATTERN % (self.get_name(),)
menu_item = self._construct(gtype_name)
#menu_item.connect("parent-set", self._tool_item_parent_set)
return menu_item
def _construct(self, gtype_name):
try:
gtype = GObject.type_from_name(gtype_name)
except RuntimeError:
warn("Cannot construct a new %s: not loaded?" % (gtype_name,),
RuntimeWarning)
return None
if not gtype.is_a(Gtk.Widget):
warn("%s is not a Gtk.Widget subclass" % (gtype_name,),
RuntimeWarning)
return None
widget = gtype.pytype()
return widget
def _tool_item_parent_set(self, widget, old_parent):
parent = widget.get_parent()
if parent and parent.get_visible():
widget.show_all()
| gpl-2.0 | 1,078,321,670,498,157,000 | 35.811881 | 79 | 0.661108 | false |
kwierman/osf.io | website/addons/twofactor/models.py | 34 | 2189 | from base64 import b32encode
from binascii import unhexlify
from random import SystemRandom
from modularodm.fields import BooleanField, StringField, IntegerField
from oath import accept_totp
from website.addons.base import AddonUserSettingsBase
class TwoFactorUserSettings(AddonUserSettingsBase):
totp_secret = StringField() # hexadecimal
totp_drift = IntegerField()
is_confirmed = BooleanField(default=False)
@property
def totp_secret_b32(self):
return b32encode(unhexlify(self.totp_secret))
@property
def otpauth_url(self):
return 'otpauth://totp/OSF:{}?secret={}'.format(self.owner.username,
self.totp_secret_b32)
def to_json(self, user):
rv = super(TwoFactorUserSettings, self).to_json(user)
rv.update({
'is_enabled': True,
'is_confirmed': self.is_confirmed,
'secret': self.totp_secret_b32,
'drift': self.totp_drift,
})
return rv
###################
# Utility methods #
###################
def verify_code(self, code):
accepted, drift = accept_totp(key=self.totp_secret,
response=code,
drift=self.totp_drift)
if accepted:
self.totp_drift = drift
return True
return False
#############
# Callbacks #
#############
def on_add(self):
super(TwoFactorUserSettings, self).on_add()
self.totp_secret = _generate_seed()
self.totp_drift = 0
self.is_confirmed = False
def on_delete(self):
super(TwoFactorUserSettings, self).on_delete()
self.totp_secret = None
self.totp_drift = 0
self.is_confirmed = False
def _generate_seed():
"""Generate a new random seed
The length of the returned string will be a multiple of two, and
stripped of type specifier "0x" that `hex()` prepends.
:return str: A random, padded hex value
"""
x = SystemRandom().randint(0, 32 ** 16 - 1)
h = hex(x).strip('L')[2:]
if len(h) % 2:
h = '0' + h
return h
| apache-2.0 | 5,361,219,291,257,130,000 | 27.802632 | 77 | 0.570123 | false |
traveloka/ansible | lib/ansible/modules/cloud/docker/docker_network.py | 12 | 13146 | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
module: docker_network
version_added: "2.2"
short_description: Manage Docker networks
description:
- Create/remove Docker networks and connect containers to them.
- Performs largely the same function as the "docker network" CLI subcommand.
options:
name:
description:
- Name of the network to operate on.
required: true
aliases:
- network_name
connected:
description:
- List of container names or container IDs to connect to a network.
default: null
aliases:
- containers
driver:
description:
- Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
default: bridge
driver_options:
description:
- Dictionary of network settings. Consult docker docs for valid options and values.
default: null
force:
description:
- With state I(absent) forces disconnecting all containers from the
network prior to deleting the network. With state I(present) will
disconnect all containers, delete the network and re-create the
network. This option is required if you have changed the IPAM or
driver options and want an existing network to be updated to use the
new options.
default: false
appends:
description:
- By default the connected list is canonical, meaning containers not on the list are removed from the network.
Use C(appends) to leave existing containers connected.
default: false
aliases:
- incremental
ipam_driver:
description:
- Specify an IPAM driver.
default: null
ipam_options:
description:
- Dictionary of IPAM options.
default: null
state:
description:
- I(absent) deletes the network. If a network has connected containers, it
cannot be deleted. Use the C(force) option to disconnect all containers
and delete the network.
- I(present) creates the network, if it does not already exist with the
specified parameters, and connects the list of containers provided via
the connected parameter. Containers not on the list will be disconnected.
An empty list will leave no containers connected to the network. Use the
C(appends) option to leave existing containers connected. Use the C(force)
options to force re-creation of the network.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- docker
authors:
- "Ben Keith (@keitwb)"
- "Chris Houseknecht (@chouseknecht)"
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "The docker server >= 1.9.0"
'''
EXAMPLES = '''
- name: Create a network
docker_network:
name: network_one
- name: Remove all but selected list of containers
docker_network:
name: network_one
connected:
- container_a
- container_b
- container_c
- name: Remove a single container
docker_network:
name: network_one
connected: "{{ fulllist|difference(['container_a']) }}"
- name: Add a container to a network, leaving existing containers connected
docker_network:
name: network_one
connected:
- container_a
appends: yes
- name: Create a network with options
docker_network:
name: network_two
driver_options:
com.docker.network.bridge.name: net2
ipam_options:
subnet: '172.3.26.0/16'
gateway: 172.3.26.1
iprange: '192.168.1.0/24'
- name: Delete a network, disconnecting all containers
docker_network:
name: network_one
state: absent
force: yes
'''
RETURN = '''
facts:
description: Network inspection results for the affected network.
returned: success
type: complex
sample: {}
'''
from ansible.module_utils.docker_common import *
try:
from docker import utils
from docker.utils.types import Ulimit
except:
# missing docker-py handled in ansible.module_utils.docker
pass
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.network_name = None
self.connected = None
self.driver = None
self.driver_options = None
self.ipam_driver = None
self.ipam_options = None
self.appends = None
self.force = None
self.debug = None
for key, value in client.module.params.items():
setattr(self, key, value)
def container_names_in_network(network):
return [c['Name'] for c in network['Containers'].values()]
class DockerNetworkManager(object):
def __init__(self, client):
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {
u'changed': False,
u'actions': []
}
self.diff = self.client.module._diff
self.existing_network = self.get_existing_network()
if not self.parameters.connected and self.existing_network:
self.parameters.connected = container_names_in_network(self.existing_network)
state = self.parameters.state
if state == 'present':
self.present()
elif state == 'absent':
self.absent()
def get_existing_network(self):
networks = self.client.networks()
network = None
for n in networks:
if n['Name'] == self.parameters.network_name:
network = n
return network
def has_different_config(self, net):
'''
Evaluates an existing network and returns a tuple containing a boolean
indicating if the configuration is different and a list of differences.
:param net: the inspection output for an existing network
:return: (bool, list)
'''
different = False
differences = []
if self.parameters.driver and self.parameters.driver != net['Driver']:
different = True
differences.append('driver')
if self.parameters.driver_options:
if not net.get('Options'):
different = True
differences.append('driver_options')
else:
for key, value in self.parameters.driver_options.items():
if not net['Options'].get(key) or value != net['Options'][key]:
different = True
differences.append('driver_options.%s' % key)
if self.parameters.ipam_driver:
if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
different = True
differences.append('ipam_driver')
if self.parameters.ipam_options:
if not net.get('IPAM') or not net['IPAM'].get('Config'):
different = True
differences.append('ipam_options')
else:
for key, value in self.parameters.ipam_options.items():
camelkey = None
for net_key in net['IPAM']['Config'][0]:
if key == net_key.lower():
camelkey = net_key
break
if not camelkey:
# key not found
different = True
differences.append('ipam_options.%s' % key)
elif net['IPAM']['Config'][0].get(camelkey) != value:
# key has different value
different = True
differences.append('ipam_options.%s' % key)
return different, differences
def create_network(self):
if not self.existing_network:
ipam_pools = []
if self.parameters.ipam_options:
ipam_pools.append(utils.create_ipam_pool(**self.parameters.ipam_options))
ipam_config = utils.create_ipam_config(driver=self.parameters.ipam_driver,
pool_configs=ipam_pools)
if not self.check_mode:
resp = self.client.create_network(self.parameters.network_name,
driver=self.parameters.driver,
options=self.parameters.driver_options,
ipam=ipam_config)
self.existing_network = self.client.inspect_network(resp['Id'])
self.results['actions'].append("Created network %s with driver %s" % (self.parameters.network_name, self.parameters.driver))
self.results['changed'] = True
def remove_network(self):
if self.existing_network:
self.disconnect_all_containers()
if not self.check_mode:
self.client.remove_network(self.parameters.network_name)
self.results['actions'].append("Removed network %s" % (self.parameters.network_name,))
self.results['changed'] = True
def is_container_connected(self, container_name):
return container_name in container_names_in_network(self.existing_network)
def connect_containers(self):
for name in self.parameters.connected:
if not self.is_container_connected(name):
if not self.check_mode:
self.client.connect_container_to_network(name, self.parameters.network_name)
self.results['actions'].append("Connected container %s" % (name,))
self.results['changed'] = True
def disconnect_missing(self):
for c in self.existing_network['Containers'].values():
name = c['Name']
if name not in self.parameters.connected:
self.disconnect_container(name)
def disconnect_all_containers(self):
containers = self.client.inspect_network(self.parameters.network_name)['Containers']
for cont in containers.values():
self.disconnect_container(cont['Name'])
def disconnect_container(self, container_name):
if not self.check_mode:
self.client.disconnect_container_from_network(container_name, self.parameters.network_name)
self.results['actions'].append("Disconnected container %s" % (container_name,))
self.results['changed'] = True
def present(self):
different = False
differences = []
if self.existing_network:
different, differences = self.has_different_config(self.existing_network)
if self.parameters.force or different:
self.remove_network()
self.existing_network = None
self.create_network()
self.connect_containers()
if not self.parameters.appends:
self.disconnect_missing()
if self.diff or self.check_mode or self.parameters.debug:
self.results['diff'] = differences
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
self.results['ansible_facts'] = {u'ansible_docker_network': self.get_existing_network()}
def absent(self):
self.remove_network()
def main():
argument_spec = dict(
network_name = dict(type='str', required=True, aliases=['name']),
connected = dict(type='list', default=[], aliases=['containers']),
state = dict(type='str', default='present', choices=['present', 'absent']),
driver = dict(type='str', default='bridge'),
driver_options = dict(type='dict', default={}),
force = dict(type='bool', default=False),
appends = dict(type='bool', default=False, aliases=['incremental']),
ipam_driver = dict(type='str', default=None),
ipam_options = dict(type='dict', default={}),
debug = dict(type='bool', default=False)
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True
)
cm = DockerNetworkManager(client)
client.module.exit_json(**cm.results)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | 462,739,406,585,472,900 | 33.413613 | 136 | 0.609767 | false |
matthagy/Jamenson | jamenson/tests/runtime/atypes/etypes.py | 1 | 1969 |
import unittest
from jamenson.runtime.atypes import *
from jamenson.runtime.atypes.etypes import *
class TestETypes(unittest.TestCase):
def chkt(self, tp, op):
self.failUnless(typep(op, tp), "%r is not of type %s" % (op, tp))
def chkf(self, tp, op):
self.failIf(typep(op, tp), "%r is of type %s" % (op, tp))
def chktseq(self, tp, seq):
for el in seq:
self.chkt(tp, el)
def chkfseq(self, tp, seq):
for el in seq:
self.chkf(tp, el)
evens = [0, 2L, -10, 20L, 1<<40]
odds = [1,-1,-1001,1001, (1<<40) + 1]
floats = map(float, evens + odds)
non_numbers = ["0","1","2","adfadf", object(), [], range(10),
set(), dict()]
def testeven(self):
self.chktseq(even_type, self.evens)
self.chkfseq(even_type, self.odds)
self.chkfseq(even_type, self.floats)
self.chkfseq(even_type, self.non_numbers)
def testodd(self):
self.chktseq(odd_type, self.odds)
self.chkfseq(odd_type, self.evens)
self.chkfseq(odd_type, self.floats)
self.chkfseq(odd_type, self.non_numbers)
def testzero(self):
self.chktseq(zero_type, [0, 0L, 0.0, 0j+0])
self.chkfseq(zero_type, [1,1L,1.0,1j+1,43,12L] + self.non_numbers)
def testpositive(self):
self.chktseq(positive_type, [5, 3, 12L, 1.0])
self.chkfseq(positive_type, [0, -1, -10L, -43.54, 5j, -5j] +
self.non_numbers)
def testnegative(self):
self.chktseq(negative_type, [-5, -3, -12L, -1.0])
self.chkfseq(negative_type, [0, 1, 10L, 43.54, 5j, -5j] +
self.non_numbers)
def testiter(self):
self.chktseq(iterable_type, [(), [], {}, set(), "", xrange(10),
(x for x in xrange(10))])
self.chkfseq(iterable_type, [1, None, lambda x : x])
__name__ == '__main__' and unittest.main()
| apache-2.0 | 7,941,851,142,259,293,000 | 30.253968 | 74 | 0.53936 | false |
AlexanderKaluzhny/instanotifier | instanotifier/api/notification/filters.py | 1 | 1970 | from django_filters import rest_framework as filters
from django.core.validators import EMPTY_VALUES
from instanotifier.notification.models import Ratings
class RatingDownvotedFilter(filters.BooleanFilter):
def filter(self, qs, value):
if not value or value in EMPTY_VALUES:
return qs
return qs.exclude(**{self.field_name: Ratings.DOWNVOTED})
class ShowOnlyFilterBase(filters.CharFilter):
ALLOWED_VALUES = ["all", "upvoted", "downvoted", "bookmarked"]
def filter(self, qs, value):
raise NotImplementedError()
class ShowOnlyFilter(ShowOnlyFilterBase):
def filter(self, qs, value):
if not value or value in EMPTY_VALUES:
return qs
if value == "all":
return qs
elif value == "upvoted":
return qs.filter(rating=Ratings.UPVOTED)
elif value == "downvoted":
return qs.filter(rating=Ratings.DOWNVOTED)
elif value == "bookmarked":
return qs.filter(is_bookmarked=True)
return qs
class NotificationFilter(filters.FilterSet):
published_parsed__date = filters.DateFilter(field_name="published_parsed", lookup_expr="date")
exclude_downvoted = RatingDownvotedFilter(field_name="rating")
show_only = ShowOnlyFilter()
class DatesShowOnlyFilter(ShowOnlyFilterBase):
"""
Based on the `daily_posted_ratings` results, filters days that have
"upvoted", "downvoted", "bookmarked" notifications.
"""
def filter(self, qs, value):
if not value or value in EMPTY_VALUES:
return qs
if value == "all":
return qs
elif value == "upvoted":
return qs.filter(upvoted__gt=0)
elif value == "downvoted":
return qs.filter(downvoted__gt=0)
elif value == "bookmarked":
return qs.filter(bookmarked__gt=0)
return qs
class DatesFilter(filters.FilterSet):
show_only = DatesShowOnlyFilter()
| mit | -7,402,247,083,607,435,000 | 27.970588 | 98 | 0.648223 | false |
pgroudas/pants | src/python/pants/backend/jvm/targets/jvm_binary.py | 1 | 14476 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import re
from hashlib import sha1
from six import string_types
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.exceptions import TargetDefinitionException
from pants.base.payload import Payload
from pants.base.payload_field import (ExcludesField, FingerprintedField, FingerprintedMixin,
PrimitiveField)
from pants.base.validation import assert_list
from pants.util.meta import AbstractClass
class JarRule(FingerprintedMixin, AbstractClass):
def __init__(self, apply_pattern, payload=None):
self.payload = payload or Payload()
if not isinstance(apply_pattern, string_types):
raise ValueError('The supplied apply_pattern is not a string, given: {}'
.format(apply_pattern))
try:
self._apply_pattern = re.compile(apply_pattern)
except re.error as e:
raise ValueError('The supplied apply_pattern: {pattern} '
'is not a valid regular expression: {msg}'
.format(pattern=apply_pattern, msg=e))
self.payload.add_fields({
'apply_pattern' : PrimitiveField(apply_pattern),
})
def fingerprint(self):
return self.payload.fingerprint()
@property
def apply_pattern(self):
"""The pattern that matches jar entry paths this rule applies to.
:rtype: re.RegexObject
"""
return self._apply_pattern
class Skip(JarRule):
"""A rule that skips adding matched entries to a jar."""
def __repr__(self):
return "Skip(apply_pattern={})".format(self.payload.apply_pattern)
class Duplicate(JarRule):
"""A rule that indicates how duplicate entries should be handled when building a jar."""
class Error(Exception):
"""Raised by the ``FAIL`` action when a duplicate entry is encountered"""
def __init__(self, path):
"""Creates a duplicate entry error for the given path.
:param str path: The path of the duplicate entry.
"""
assert path and isinstance(path, string_types), 'A non-empty path must be supplied.'
super(Duplicate.Error, self).__init__('Duplicate entry encountered for path {}'.format(path))
self._path = path
@property
def path(self):
"""The path of the duplicate entry."""
return self._path
SKIP = 'SKIP'
"""Retains the 1st entry and skips subsequent duplicates."""
REPLACE = 'REPLACE'
"""Retains the most recent entry and skips prior duplicates."""
CONCAT = 'CONCAT'
"""Concatenates the contents of all duplicate entries encountered in the order encountered."""
FAIL = 'FAIL'
"""Raises a :class:``Duplicate.Error`` when a duplicate entry is
encountered.
"""
_VALID_ACTIONS = frozenset((SKIP, REPLACE, CONCAT, FAIL))
@classmethod
def validate_action(cls, action):
"""Verifies the given action is a valid duplicate jar rule action.
:returns: The action if it is valid.
:raises: ``ValueError`` if the action is invalid.
"""
if action not in cls._VALID_ACTIONS:
raise ValueError('The supplied action must be one of {valid}, given: {given}'
.format(valid=cls._VALID_ACTIONS, given=action))
return action
def __init__(self, apply_pattern, action):
"""Creates a rule for handling duplicate jar entries.
:param str apply_pattern: A regular expression that matches duplicate jar entries this rule
applies to.
:param action: An action to take to handle one or more duplicate entries. Must be one of:
``Duplicate.SKIP``, ``Duplicate.REPLACE``, ``Duplicate.CONCAT`` or ``Duplicate.FAIL``.
"""
payload = Payload()
payload.add_fields({
'action' : PrimitiveField(self.validate_action(action)),
})
super(Duplicate, self).__init__(apply_pattern, payload=payload)
@property
def action(self):
"""The action to take for any duplicate entries that match this rule's ``apply_pattern``."""
return self.payload.action
def fingerprint(self):
return self.payload.fingerprint()
def __repr__(self):
return "Duplicate(apply_pattern={0}, action={1})".format(self.payload.apply_pattern,
self.payload.action)
class JarRules(FingerprintedMixin):
"""A set of rules for packaging up a deploy jar.
Deploy jars are executable jars with fully self-contained classpaths and as such, assembling them
presents problems given jar semantics.
One issue is signed jars that must be included on the
classpath. These have a signature that depends on the jar contents and assembly of the deploy jar
changes the content of the jar, breaking the signatures. For cases like these the signed jars
must be verified and then the signature information thrown away. The `Skip <#Skip>`_
rule supports this sort of issue by allowing outright entry exclusion in the final deploy jar.
Another issue is duplicate jar entries. Although the underlying zip format supports these, the
java jar tool and libraries do not. As such some action must be taken for each duplicate entry
such that there are no duplicates in the final deploy jar. The four
`Duplicate <#Duplicate>`_ rules support resolution of these cases by allowing 1st wins,
last wins, concatenation of the duplicate entry contents or raising an exception.
"""
@classmethod
def skip_signatures_and_duplicates_concat_well_known_metadata(cls, default_dup_action=None,
additional_rules=None):
"""Produces a rule set useful in many deploy jar creation contexts.
The rule set skips duplicate entries by default, retaining the 1st encountered. In addition it
has the following special handling:
- jar signature metadata is dropped
- ``java.util.ServiceLoader`` provider-configuration files are concatenated in the order
encountered
:param default_dup_action: An optional default action to take for duplicates. Defaults to
`Duplicate.SKIP` if not specified.
:param additional_rules: Optionally one or more jar rules to add to those described above.
:returns: JarRules
"""
default_dup_action = Duplicate.validate_action(default_dup_action or Duplicate.SKIP)
additional_rules = assert_list(additional_rules, expected_type=(Duplicate, Skip))
rules = [Skip(r'^META-INF/[^/]+\.SF$'), # signature file
Skip(r'^META-INF/[^/]+\.DSA$'), # default signature alg. file
Skip(r'^META-INF/[^/]+\.RSA$'), # default signature alg. file
Duplicate(r'^META-INF/services/', Duplicate.CONCAT)] # 1 svc fqcn per line
return JarRules(rules=rules + additional_rules, default_dup_action=default_dup_action)
_DEFAULT = None
@classmethod
def default(cls):
"""Returns the default set of jar rules.
Can be set with `set_default` but otherwise defaults to
`skip_signatures_and_duplicates_concat_well_known_metadata`.
"""
if cls._DEFAULT is None:
cls._DEFAULT = cls.skip_signatures_and_duplicates_concat_well_known_metadata()
return cls._DEFAULT
@classmethod
def set_default(cls, rules):
"""Sets the default site-wide jar rules."""
if not isinstance(rules, JarRules):
raise ValueError('The default rules must be a JarRules instance.')
cls._DEFAULT = rules
def __init__(self, rules=None, default_dup_action=Duplicate.SKIP):
"""Creates a new set of jar rules with the default duplicate action of ``Duplicate.SKIP``.
:param rules: One or more rules that will be applied in order to jar entries being packaged in
a deploy jar. `Skip <#Skip>`_ rules can go here.
:param default_dup_action: The default action to take when a duplicate entry is encountered and
no explicit rules apply to the entry.
"""
self.payload = Payload()
self.payload.add_fields({
'default_dup_action' : PrimitiveField(Duplicate.validate_action(default_dup_action))
})
self._rules = assert_list(rules, expected_type=JarRule)
@property
def default_dup_action(self):
"""The default action to take when a duplicate jar entry is encountered."""
return self.payload.default_dup_action
@property
def rules(self):
"""A copy of the list of explicit entry rules in effect."""
return list(self._rules)
def fingerprint(self):
hasher = sha1()
hasher.update(self.payload.fingerprint())
for rule in self.rules:
hasher.update(rule.fingerprint())
return hasher.hexdigest()
@property
def value(self):
return self._jar_rules
class ManifestEntries(FingerprintedMixin):
"""Describes additional items to add to the app manifest."""
class ExpectedDictionaryError(Exception):
pass
def __init__(self, entries=None):
"""
:param entries: Additional headers, value pairs to add to the MANIFEST.MF.
You can just add fixed string header / value pairs.
:type entries: dictionary of string : string
"""
self.payload = Payload()
if entries:
if not isinstance(entries, dict):
raise self.ExpectedDictionaryError("entries must be a dictionary of strings.")
for key in entries.keys():
if not isinstance(key, string_types):
raise self.ExpectedDictionaryError(
"entries must be dictionary of strings, got key {} type {}"
.format(key, type(key).__name__))
self.payload.add_fields({
'entries' : PrimitiveField(entries or {}),
})
def fingerprint(self):
return self.payload.fingerprint()
@property
def entries(self):
return self.payload.entries
class JvmBinary(JvmTarget):
"""Produces a JVM binary optionally identifying a launcher main class.
Below are a summary of how key goals affect targets of this type:
* ``bundle`` - Creates a self-contained directory with the binary and all
its dependencies, optionally archived, suitable for deployment.
* ``binary`` - Create an executable jar of the binary. On the JVM
this means the jar has a manifest specifying the main class.
* ``run`` - Executes the main class of this binary locally.
"""
def __init__(self,
name=None,
address=None,
payload=None,
main=None,
basename=None,
source=None,
deploy_excludes=None,
deploy_jar_rules=None,
manifest_entries=None,
**kwargs):
"""
:param string main: The name of the ``main`` class, e.g.,
``'org.pantsbuild.example.hello.main.HelloMain'``. This class may be
present as the source of this target or depended-upon library.
:param string basename: Base name for the generated ``.jar`` file, e.g.,
``'hello'``. (By default, uses ``name`` param)
:param string source: Name of one ``.java`` or ``.scala`` file (a good
place for a ``main``).
:param sources: Overridden by source. If you want more than one source
file, use a library and have the jvm_binary depend on that library.
:param resources: List of ``resource``\s to include in bundle.
:param dependencies: Targets (probably ``java_library`` and
``scala_library`` targets) to "link" in.
:type dependencies: list of target specs
:param deploy_excludes: List of `exclude <#exclude>`_\s to apply
at deploy time.
If you, for example, deploy a java servlet that has one version of
``servlet.jar`` onto a Tomcat environment that provides another version,
they might conflict. ``deploy_excludes`` gives you a way to build your
code but exclude the conflicting ``jar`` when deploying.
:param deploy_jar_rules: `Jar rules <#jar_rules>`_ for packaging this binary in a
deploy jar.
:param manifest_entries: dict that specifies entries for `ManifestEntries <#manifest_entries>`_
for adding to MANIFEST.MF when packaging this binary.
:param configurations: Ivy configurations to resolve for this target.
This parameter is not intended for general use.
:type configurations: tuple of strings
"""
self.address = address # Set in case a TargetDefinitionException is thrown early
if main and not isinstance(main, string_types):
raise TargetDefinitionException(self, 'main must be a fully qualified classname')
if source and not isinstance(source, string_types):
raise TargetDefinitionException(self, 'source must be a single relative file path')
if deploy_jar_rules and not isinstance(deploy_jar_rules, JarRules):
raise TargetDefinitionException(self,
'deploy_jar_rules must be a JarRules specification. got {}'
.format(type(deploy_jar_rules).__name__))
if manifest_entries and not isinstance(manifest_entries, dict):
raise TargetDefinitionException(self,
'manifest_entries must be a dict. got {}'
.format(type(manifest_entries).__name__))
sources = [source] if source else None
payload = payload or Payload()
payload.add_fields({
'basename' : PrimitiveField(basename or name),
'deploy_excludes' : ExcludesField(self.assert_list(deploy_excludes, expected_type=Exclude)),
'deploy_jar_rules' : FingerprintedField(deploy_jar_rules or JarRules.default()),
'manifest_entries' : FingerprintedField(ManifestEntries(manifest_entries)),
'main': PrimitiveField(main),
})
super(JvmBinary, self).__init__(name=name,
address=address,
payload=payload,
sources=self.assert_list(sources),
**kwargs)
@property
def basename(self):
return self.payload.basename
@property
def deploy_excludes(self):
return self.payload.deploy_excludes
@property
def deploy_jar_rules(self):
return self.payload.deploy_jar_rules
@property
def main(self):
return self.payload.main
@property
def manifest_entries(self):
return self.payload.manifest_entries
| apache-2.0 | 330,148,365,574,405,400 | 38.769231 | 100 | 0.670696 | false |
ktosiek/spacewalk | client/tools/rhncfg/config_management/rhncfg-manager.py | 2 | 1360 | #!/usr/bin/python
#
# Copyright (c) 2008 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
RHNROOT = '/usr/share/rhn'
import sys
if RHNROOT not in sys.path:
sys.path.append(RHNROOT)
from config_common.rhn_main import BaseMain
class Main(BaseMain):
modes = [
'add',
'create-channel',
'diff',
'diff-revisions',
'download-channel',
'get',
'list',
'list-channels',
'remove',
'remove-channel',
'revisions',
'update',
'upload-channel',
]
plugins_dir = 'config_management'
config_section = 'rhncfg-manager'
mode_prefix = 'rhncfg'
if __name__ == '__main__':
try:
sys.exit(Main().main() or 0)
except KeyboardInterrupt:
sys.stderr.write("user interrupted\n")
sys.exit(0)
| gpl-2.0 | -1,645,302,312,042,023,000 | 26.2 | 73 | 0.644853 | false |
etherkit/OpenBeacon2 | macos/venv/lib/python3.8/site-packages/pip/_internal/utils/entrypoints.py | 20 | 1152 | import sys
from pip._internal.cli.main import main
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, List
def _wrapper(args=None):
# type: (Optional[List[str]]) -> int
"""Central wrapper for all old entrypoints.
Historically pip has had several entrypoints defined. Because of issues
arising from PATH, sys.path, multiple Pythons, their interactions, and most
of them having a pip installed, users suffer every time an entrypoint gets
moved.
To alleviate this pain, and provide a mechanism for warning users and
directing them to an appropriate place for help, we now define all of
our old entrypoints as wrappers for the current one.
"""
sys.stderr.write(
"WARNING: pip is being invoked by an old script wrapper. This will "
"fail in a future version of pip.\n"
"Please see https://github.com/pypa/pip/issues/5599 for advice on "
"fixing the underlying issue.\n"
"To avoid this problem you can invoke Python with '-m pip' instead of "
"running pip directly.\n"
)
return main(args)
| gpl-3.0 | -3,861,451,368,770,873,300 | 36.16129 | 79 | 0.700521 | false |
dneg/cortex | python/IECoreHoudini/TestCase.py | 12 | 2146 | ##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import hou
import unittest
## A class to help implement unit tests for Houdini functionality. It
# implements setUp() to create a new houdini scene to perform the test in.
class TestCase( unittest.TestCase ) :
## Derived classes may override this, but they should call the
# base class implementation too.
def setUp( self ) :
hou.hipFile.clear( True )
| bsd-3-clause | -6,953,863,693,120,572,000 | 45.652174 | 76 | 0.701305 | false |
grdlok/UStar-dl | src/youtube_dl/extractor/ustream.py | 3 | 3623 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_urlparse,
)
class UstreamIE(InfoExtractor):
_VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<videoID>\d+)'
IE_NAME = 'ustream'
_TEST = {
'url': 'http://www.ustream.tv/recorded/20274954',
'md5': '088f151799e8f572f84eb62f17d73e5c',
'info_dict': {
'id': '20274954',
'ext': 'flv',
'uploader': 'Young Americans for Liberty',
'title': 'Young Americans for Liberty February 7, 2012 2:28 AM',
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('videoID')
# some sites use this embed format (see: http://github.com/rg3/youtube-dl/issues/2990)
if m.group('type') == 'embed/recorded':
video_id = m.group('videoID')
desktop_url = 'http://www.ustream.tv/recorded/' + video_id
return self.url_result(desktop_url, 'Ustream')
if m.group('type') == 'embed':
video_id = m.group('videoID')
webpage = self._download_webpage(url, video_id)
desktop_video_id = self._html_search_regex(
r'ContentVideoIds=\["([^"]*?)"\]', webpage, 'desktop_video_id')
desktop_url = 'http://www.ustream.tv/recorded/' + desktop_video_id
return self.url_result(desktop_url, 'Ustream')
video_url = 'http://tcdn.ustream.tv/video/%s' % video_id
webpage = self._download_webpage(url, video_id)
self.report_extraction(video_id)
video_title = self._html_search_regex(r'data-title="(?P<title>.+)"',
webpage, 'title')
uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
webpage, 'uploader', fatal=False, flags=re.DOTALL)
thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"',
webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'url': video_url,
'ext': 'flv',
'title': video_title,
'uploader': uploader,
'thumbnail': thumbnail,
}
class UstreamChannelIE(InfoExtractor):
_VALID_URL = r'https?://www\.ustream\.tv/channel/(?P<slug>.+)'
IE_NAME = 'ustream:channel'
_TEST = {
'url': 'http://www.ustream.tv/channel/channeljapan',
'info_dict': {
'id': '10874166',
},
'playlist_mincount': 17,
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
display_id = m.group('slug')
webpage = self._download_webpage(url, display_id)
channel_id = self._html_search_meta('ustream:channel_id', webpage)
BASE = 'http://www.ustream.tv'
next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id
video_ids = []
while next_url:
reply = self._download_json(
compat_urlparse.urljoin(BASE, next_url), display_id,
note='Downloading video information (next: %d)' % (len(video_ids) + 1))
video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data']))
next_url = reply['nextUrl']
entries = [
self.url_result('http://www.ustream.tv/recorded/' + vid, 'Ustream')
for vid in video_ids]
return {
'_type': 'playlist',
'id': channel_id,
'display_id': display_id,
'entries': entries,
}
| unlicense | 151,072,246,938,129,340 | 34.871287 | 102 | 0.548441 | false |
osuripple/lets | handlers/osuSearchHandler.py | 1 | 1836 | import tornado.gen
import tornado.web
from common.sentry import sentry
from common.web import requestsManager
from common.web import cheesegull
from constants import exceptions
from common.log import logUtils as log
class handler(requestsManager.asyncRequestHandler):
"""
Handler for /web/osu-search.php
"""
MODULE_NAME = "osu_direct_search"
@tornado.web.asynchronous
@tornado.gen.engine
@sentry.captureTornado
def asyncGet(self):
output = ""
try:
try:
# Get arguments
gameMode = self.get_argument("m", None)
if gameMode is not None:
gameMode = int(gameMode)
if gameMode < 0 or gameMode > 3:
gameMode = None
rankedStatus = self.get_argument("r", None)
if rankedStatus is not None:
rankedStatus = int(rankedStatus)
query = self.get_argument("q", "")
page = int(self.get_argument("p", "0"))
if query.lower() in ["newest", "top rated", "most played"]:
query = ""
except ValueError:
raise exceptions.invalidArgumentsException(self.MODULE_NAME)
# Get data from cheesegull API
log.info("Requested osu!direct search: {}".format(query if query != "" else "index"))
searchData = cheesegull.getListing(rankedStatus=cheesegull.directToApiStatus(rankedStatus), page=page * 100, gameMode=gameMode, query=query)
if searchData is None or searchData is None:
raise exceptions.noAPIDataError()
# Write output
output += "999" if len(searchData) == 100 else str(len(searchData))
output += "\n"
for beatmapSet in searchData:
try:
output += cheesegull.toDirect(beatmapSet) + "\r\n"
except ValueError:
# Invalid cheesegull beatmap (empty beatmapset, cheesegull bug? See Sentry #LETS-00-32)
pass
except (exceptions.noAPIDataError, exceptions.invalidArgumentsException):
output = "0\n"
finally:
self.write(output)
| agpl-3.0 | -4,759,483,761,404,035,000 | 30.118644 | 143 | 0.705338 | false |
feschmidt/stcad | testing_scripts/rick_drum.py | 1 | 2218 | import numpy as np
from stcad.source_dev.utilities import *
from stcad.source_dev.objects import *
from stcad.source_dev.chip import *
import gdsCAD as cad
from stcad.source_dev.drums import *
chip = Base_Chip('drum_selection', 1200, 300,label=False)
position = [0,0]
array_separation = 50
drum1 = circ_gap_drum(drum_size=30,tether_width=4,number_of_tethers=10,
release_holes_diameter = 1, release_holes_pitch = 20,release_holes_area_radius = 100)
drum1.translate(position=position)
drum1.add_to_chip(Base_Chip=chip)
position = [drum1._bounding_box[1,0]+array_separation,0]
drum2 = circular_drum2(tether_width=0.5, drum_gap=2, drum_size=5)
drum2.translate(position=position)
drum2.add_to_chip(Base_Chip=chip)
position = [drum2._bounding_box[1,0]+array_separation,0]
drum3 = rounded_base_drum3(tether_width=0.5, drum_gap=1, drum_size=20, corner_rad = 0.5, nr_of_points = 20)
drum3.translate(position=position)
drum3.add_to_chip(Base_Chip=chip)
position = [drum3._bounding_box[1,0]+array_separation,0]
drum4 = rounded_base_drum4(tether_width=0.5, drum_gap=2, drum_size=5,corner_rad = 1, nr_of_points = 20)
drum4.translate(position=position)
drum4.add_to_chip(Base_Chip=chip)
position = [drum4._bounding_box[1,0]+array_separation,0]
drum5 = rounded_base_drum5(tether_width=0.5, drum_gap=2, drum_size=5,corner_rad = 1, nr_of_points = 20)
drum5.translate(position=position)
drum5.add_to_chip(Base_Chip=chip)
position = [drum5._bounding_box[1,0]+array_separation,0]
drum6 = rounded_base_drum4(tether_width=1, drum_gap=5, drum_size=10,corner_rad = 1, nr_of_points = 20)
drum6.translate(position=position)
drum6.add_to_chip(Base_Chip=chip)
position = [drum6._bounding_box[1,0]+array_separation,0]
drum7 = rounded_base_drum5(tether_width=2, drum_gap=15, drum_size=40,corner_rad = 1, nr_of_points = 20)
drum7.translate(position=position)
drum7.add_to_chip(Base_Chip=chip)
position = [drum7._bounding_box[1,0]+array_separation,0]
drum8 = circ_gap_drum(drum_size=20,tether_width=4,number_of_tethers=7)
drum8.translate(position=position)
drum8.add_to_chip(Base_Chip=chip)
# ------------------------------------------- Array End ----------------------------------
chip.save_to_gds(show=False, save=True,loc='') | gpl-3.0 | -2,610,892,884,584,815,600 | 37.929825 | 107 | 0.719116 | false |
eruffaldi/pyoni | src/xndec/xndec.py | 1 | 3965 | import ctypes,os
# XnStatus XnStreamUncompressDepth16ZWithEmbTable(const XnUInt8* pInput, const XnUInt32 nInputSize, XnUInt16* pOutput, XnUInt32* pnOutputSize)
# XnStatus XnStreamUncompressDepth16Z(const XnUInt8* pInput, const XnUInt32 nInputSize, XnUInt16* pOutput, XnUInt32* pnOutputSize)
# XnStatus XnStreamUncompressImage8Z(const XnUInt8* pInput, const XnUInt32 nInputSize, XnUInt8* pOutput, XnUInt32* pnOutputSize)
# XnStatus XnStreamUncompressConf4(const XnUInt8* pInput, const XnUInt32 nInputSize, XnUInt8* pOutput, XnUInt32* pnOutputSize)
types = ["",".so",".dylib",".pyd"]
x = None
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
for a in types:
try:
pa = os.path.join(dir_path,"libxndec" + a)
print "looking for",pa
x = ctypes.CDLL(pa)
if x is not None:
print "found pa",pa
break
except:
pass
if x is None:
raise Exception("Unknown entity")
f = x.XnStreamUncompressDepth16ZWithEmbTable
et = ctypes.POINTER(ctypes.c_uint8)
f.argtypes = [ctypes.c_char_p,ctypes.c_int,ctypes.POINTER(ctypes.c_uint16),ctypes.POINTER(ctypes.c_int)]
f.restype = ctypes.c_int
XnStreamUncompressDepth16ZWithEmbTable = f
f = x.XnStreamUncompressDepth16Z
et = ctypes.POINTER(ctypes.c_uint8)
f.argtypes = [ctypes.c_char_p,ctypes.c_int,ctypes.POINTER(ctypes.c_uint16),ctypes.POINTER(ctypes.c_int)]
f.restype = ctypes.c_int
XnStreamUncompressDepth16Z = f
f = x.XnStreamUncompressImage8Z
et = ctypes.POINTER(ctypes.c_uint8)
f.argtypes = [ctypes.c_char_p,ctypes.c_int,ctypes.POINTER(ctypes.c_uint8),ctypes.POINTER(ctypes.c_int)]
f.restype = ctypes.c_int
XnStreamUncompressImage8Z = f
f = x.XnStreamUncompressConf4
et = ctypes.POINTER(ctypes.c_uint8)
f.argtypes = [ctypes.c_char_p,ctypes.c_int,ctypes.POINTER(ctypes.c_uint8),ctypes.POINTER(ctypes.c_int)]
f.restype = ctypes.c_int
XnStreamUncompressConf4 = f
#XnStatus XnStreamCompressDepth16ZWithEmbTable(const XnUInt16* pInput, const XnUInt32 nInputSize, XnUInt8* pOutput, XnUInt32* pnOutputSize, XnUInt16 nMaxValue)
f = x.XnStreamCompressDepth16ZWithEmbTable
et = ctypes.POINTER(ctypes.c_uint8)
f.argtypes = [ctypes.POINTER(ctypes.c_uint16),ctypes.c_int,ctypes.POINTER(ctypes.c_uint8),ctypes.POINTER(ctypes.c_int),ctypes.c_int]
f.restype = ctypes.c_int
XnStreamCompressDepth16ZWithEmbTable = f
def allocoutput16(n):
#pt = ctypes.c_uint16*n
#p = pt()
#return p
return ctypes.create_string_buffer(n*2)
def allocoutput8(n):
#pt = ctypes.c_uint16*n
#p = pt()
#return p
return ctypes.create_string_buffer(n)
def doXnStreamUncompressConf4(input,outputbuffer):
r = ctypes.c_int(len(outputbuffer));
rr = XnStreamUncompressConf4(ctypes.c_char_p(input),len(input),ctypes.cast(outputbuffer,ctypes.POINTER(ctypes.c_uint8)),ctypes.byref(r))
return (rr,r.value)
def doXnStreamUncompressImage8Z(input,outputbuffer):
r = ctypes.c_int(len(outputbuffer));
rr = XnStreamUncompressImage8Z(ctypes.c_char_p(input),len(input),ctypes.cast(outputbuffer,ctypes.POINTER(ctypes.c_uint8)),ctypes.byref(r))
return (rr,r.value)
def doXnStreamUncompressDepth16ZWithEmbTable(input,outputbuffer):
r = ctypes.c_int(len(outputbuffer));
rr = XnStreamUncompressDepth16ZWithEmbTable(ctypes.c_char_p(input),len(input),ctypes.cast(outputbuffer,ctypes.POINTER(ctypes.c_uint16)),ctypes.byref(r))
return (rr,r.value)
def doXnStreamCompressDepth16ZWithEmbTable(input,outputbuffer,maxvalue):
r = ctypes.c_int(len(outputbuffer));
rr = XnStreamCompressDepth16ZWithEmbTable(ctypes.cast(input,ctypes.POINTER(ctypes.c_uint16)),len(input),ctypes.cast(outputbuffer,ctypes.POINTER(ctypes.c_uint8)),ctypes.byref(r),maxvalue)
return (rr,r.value)
def doXnStreamUncompressDepth16Z(input,outputbuffer):
r = ctypes.c_int(len(outputbuffer));
rr = XnStreamUncompressDepth16Z(ctypes.c_char_p(input),len(input),ctypes.cast(outputbuffer,ctypes.POINTER(ctypes.c_uint16)),ctypes.byref(r))
return (rr,r.value)
if __name__ == "__main__":
a = "1234"
b = allocoutput16(123)
print doXnStreamUncompressDepth16ZWithEmbTable(a,b) | mit | -2,357,825,303,375,275,500 | 38.66 | 187 | 0.775032 | false |
pierreg/tensorflow | tensorflow/contrib/training/__init__.py | 4 | 3073 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training and input utilities.
## Splitting sequence inputs into minibatches with state saving
Use [`SequenceQueueingStateSaver`](#SequenceQueueingStateSaver) or
its wrapper [`batch_sequences_with_states`](#batch_sequences_with_states) if
you have input data with a dynamic primary time / frame count axis which
you'd like to convert into fixed size segments during minibatching, and would
like to store state in the forward direction across segments of an example.
@@batch_sequences_with_states
@@NextQueuedSequenceBatch
@@SequenceQueueingStateSaver
## Online data resampling
To resample data with replacement on a per-example basis, use
['rejection_sample'](#rejection_sample) or
['resample_at_rate'](#resample_at_rate). For `rejection_sample`, provide
a boolean Tensor describing whether to accept or reject. For `resample_at_rate`,
providing the desired rate for each example. If you wish to specify relative
rates, rather than absolute ones, use ['weighted_resample'](#weighted_resample)
(which also returns the actual resampling rate used for each output example).
Use ['stratified_sample'](#stratified_sample) to resample without replacement
from the data to achieve a desired mix of class proportions that the Tensorflow
graph sees. For instance, if you have a binary classification dataset that is
99.9% class 1, a common approach is to resample from the data so that the data
is more balanced.
@@rejection_sample
@@resample_at_rate
@@stratified_sample
@@weighted_resample
## Bucketing
Use ['bucket'](#bucket) or
['bucket_by_sequence_length'](#bucket_by_sequence_length) to stratify
minibatches into groups ("buckets"). Use `bucket_by_sequence_length`
with the argument `dynamic_pad=True` to receive minibatches of similarly
sized sequences for efficient training via `dynamic_rnn`.
@@bucket
@@bucket_by_sequence_length
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.training.python.training.bucket_ops import *
from tensorflow.contrib.training.python.training.resample import *
from tensorflow.contrib.training.python.training.sampling_ops import *
from tensorflow.contrib.training.python.training.sequence_queueing_state_saver import *
from tensorflow.python.util.all_util import make_all
__all__ = make_all(__name__)
| apache-2.0 | -1,635,632,978,905,022,000 | 40.527027 | 87 | 0.764725 | false |
Sylrob434/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/hls.py | 8 | 1527 | import os
import subprocess
from .common import FileDownloader
from ..utils import (
encodeFilename,
)
class HlsFD(FileDownloader):
def real_download(self, filename, info_dict):
url = info_dict['url']
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
args = [
'-y', '-i', url, '-f', 'mp4', '-c', 'copy',
'-bsf:a', 'aac_adtstoasc',
encodeFilename(tmpfilename, for_subprocess=True)]
for program in ['avconv', 'ffmpeg']:
try:
subprocess.call([program, '-version'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
break
except (OSError, IOError):
pass
else:
self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
cmd = [program] + args
retval = subprocess.call(cmd)
if retval == 0:
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen(u'\r[%s] %s bytes' % (cmd[0], fsize))
self.try_rename(tmpfilename, filename)
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
})
return True
else:
self.to_stderr(u"\n")
self.report_error(u'ffmpeg exited with code %d' % retval)
return False
| gpl-3.0 | -1,986,815,887,468,632,000 | 32.195652 | 117 | 0.53962 | false |
Kupoman/Fafnir | fafnir/resolve_intersections_pass.py | 1 | 4158 | import os
from OpenGL import GL as gl
import panda3d.core as p3d
from panda3d_render_pass import RenderPass
SHADER_DIR = os.path.join(os.path.dirname(__file__), 'shaders', '')
class ResolveIntersectionsPass(RenderPass):
def __init__(
self,
name,
graphics_context,
texture_intersections,
buffer_meshes,
buffer_materials,
material_records,
depth_filter_pass,
):
fb_props = p3d.FrameBufferProperties()
fb_props.set_rgb_color(True)
fb_props.set_rgba_bits(8, 8, 8, 0)
fb_props.set_depth_bits(24)
self.material_records = material_records
self._fsq = self._make_fullscreen_quad()
scene = self._make_draw_path()
super().__init__(
name,
**graphics_context,
scene=scene,
frame_buffer_properties=fb_props,
clear_color=p3d.LColor(0.0, 0.0, 0.0, 0.0),
shader=p3d.Shader.load(
p3d.Shader.SL_GLSL,
SHADER_DIR + 'resolve_intersections.vert',
SHADER_DIR + 'resolve_intersections.frag'
),
share_depth_with=depth_filter_pass
)
self._root.set_shader_input('texture_intersections', texture_intersections)
self._root.set_shader_input('buffer_meshes', buffer_meshes.get_texture())
self._root.set_shader_input('buffer_materials', buffer_materials.get_texture())
self._root.set_shader_input('instance_id', 0)
self._root.set_depth_test(False)
self._init_clip_control()
def _make_fullscreen_quad(self):
tris = p3d.GeomTristrips(p3d.GeomEnums.UH_static)
tris.add_next_vertices(4)
vdata = p3d.GeomVertexData(
'abc',
p3d.GeomVertexFormat.get_empty(),
p3d.GeomEnums.UH_static
)
geom = p3d.Geom(vdata)
geom.add_primitive(tris)
geom.set_bounds(p3d.OmniBoundingVolume())
node = p3d.GeomNode('Resolve Pass FSQ')
node.add_geom(geom)
return p3d.NodePath(node)
def _make_draw_path(self):
cb_node = p3d.CallbackNode('Intersection Draw Callback')
cb_node_path = p3d.NodePath(cb_node)
cb_node_path.set_shader_input('instance_id', 1)
cb_node_path.set_attrib(p3d.DepthTestAttrib.make(p3d.RenderAttrib.MEqual))
def cull_callback(callback_data):
for instance in cb_node_path.children:
instance.detach_node()
for i, record in enumerate(self.material_records):
placeholder = cb_node_path.attach_new_node('placeholder')
placeholder.set_texture(record.texture)
placeholder.set_shader_input('instance_id', i)
instance = self._fsq.instance_to(placeholder)
callback_data.upcall()
cb_node.cull_callback = p3d.PythonCallbackObject(cull_callback)
return cb_node_path
def _init_clip_control(self):
def attach_new_callback(nodepath, name, callback):
cb_node = p3d.CallbackNode(name)
cb_node.draw_callback = p3d.PythonCallbackObject(callback)
cb_node_path = nodepath.attach_new_node(cb_node)
return cb_node_path
def begin(callback_data):
gl.glClipControl(gl.GL_LOWER_LEFT, gl.GL_ZERO_TO_ONE)
callback_data.upcall()
def end(callback_data):
gl.glClipControl(gl.GL_LOWER_LEFT, gl.GL_NEGATIVE_ONE_TO_ONE)
callback_data.upcall()
bin_manager = p3d.CullBinManager.get_global_ptr()
bin_manager.add_bin('clip_control_begin', p3d.CullBinManager.BT_fixed, 5)
bin_manager.add_bin('clip_control_end', p3d.CullBinManager.BT_fixed, 55)
path = attach_new_callback(
self._root,
self.name + '_clip_control_begin',
begin
)
path.set_bin('clip_control_begin', 11)
path = attach_new_callback(
self._root,
self.name + '_clip_control_end',
end
)
path.set_bin('clip_control_end', 10)
| apache-2.0 | -4,998,169,536,203,676,000 | 32.532258 | 87 | 0.587061 | false |
bertucho/epic-movie-quotes-quiz | dialogos/build/Twisted/twisted/internet/test/test_protocol.py | 8 | 17096 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.protocol}.
"""
from __future__ import division, absolute_import
from zope.interface.verify import verifyObject
from zope.interface import implementer
from twisted.python.failure import Failure
from twisted.internet.interfaces import (
IProtocol, ILoggingContext, IProtocolFactory, IConsumer)
from twisted.internet.defer import CancelledError
from twisted.internet.protocol import (
Protocol, ClientCreator, Factory, ProtocolToConsumerAdapter,
ConsumerToProtocolAdapter)
from twisted.trial.unittest import TestCase
from twisted.test.proto_helpers import MemoryReactorClock, StringTransport
from twisted.logger import LogLevel, globalLogPublisher
class ClientCreatorTests(TestCase):
"""
Tests for L{twisted.internet.protocol.ClientCreator}.
"""
def _basicConnectTest(self, check):
"""
Helper for implementing a test to verify that one of the I{connect}
methods of L{ClientCreator} passes the right arguments to the right
reactor method.
@param check: A function which will be invoked with a reactor and a
L{ClientCreator} instance and which should call one of the
L{ClientCreator}'s I{connect} methods and assert that all of its
arguments except for the factory are passed on as expected to the
reactor. The factory should be returned.
"""
class SomeProtocol(Protocol):
pass
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, SomeProtocol)
factory = check(reactor, cc)
protocol = factory.buildProtocol(None)
self.assertIsInstance(protocol, SomeProtocol)
def test_connectTCP(self):
"""
L{ClientCreator.connectTCP} calls C{reactor.connectTCP} with the host
and port information passed to it, and with a factory which will
construct the protocol passed to L{ClientCreator.__init__}.
"""
def check(reactor, cc):
cc.connectTCP('example.com', 1234, 4321, ('1.2.3.4', 9876))
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
self.assertEqual(host, 'example.com')
self.assertEqual(port, 1234)
self.assertEqual(timeout, 4321)
self.assertEqual(bindAddress, ('1.2.3.4', 9876))
return factory
self._basicConnectTest(check)
def test_connectUNIX(self):
"""
L{ClientCreator.connectUNIX} calls C{reactor.connectUNIX} with the
filename passed to it, and with a factory which will construct the
protocol passed to L{ClientCreator.__init__}.
"""
def check(reactor, cc):
cc.connectUNIX('/foo/bar', 123, True)
address, factory, timeout, checkPID = reactor.unixClients.pop()
self.assertEqual(address, '/foo/bar')
self.assertEqual(timeout, 123)
self.assertEqual(checkPID, True)
return factory
self._basicConnectTest(check)
def test_connectSSL(self):
"""
L{ClientCreator.connectSSL} calls C{reactor.connectSSL} with the host,
port, and context factory passed to it, and with a factory which will
construct the protocol passed to L{ClientCreator.__init__}.
"""
def check(reactor, cc):
expectedContextFactory = object()
cc.connectSSL('example.com', 1234, expectedContextFactory, 4321, ('4.3.2.1', 5678))
host, port, factory, contextFactory, timeout, bindAddress = reactor.sslClients.pop()
self.assertEqual(host, 'example.com')
self.assertEqual(port, 1234)
self.assertIs(contextFactory, expectedContextFactory)
self.assertEqual(timeout, 4321)
self.assertEqual(bindAddress, ('4.3.2.1', 5678))
return factory
self._basicConnectTest(check)
def _cancelConnectTest(self, connect):
"""
Helper for implementing a test to verify that cancellation of the
L{Deferred} returned by one of L{ClientCreator}'s I{connect} methods is
implemented to cancel the underlying connector.
@param connect: A function which will be invoked with a L{ClientCreator}
instance as an argument and which should call one its I{connect}
methods and return the result.
@return: A L{Deferred} which fires when the test is complete or fails if
there is a problem.
"""
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, Protocol)
d = connect(cc)
connector = reactor.connectors.pop()
self.assertFalse(connector._disconnected)
d.cancel()
self.assertTrue(connector._disconnected)
return self.assertFailure(d, CancelledError)
def test_cancelConnectTCP(self):
"""
The L{Deferred} returned by L{ClientCreator.connectTCP} can be cancelled
to abort the connection attempt before it completes.
"""
def connect(cc):
return cc.connectTCP('example.com', 1234)
return self._cancelConnectTest(connect)
def test_cancelConnectUNIX(self):
"""
The L{Deferred} returned by L{ClientCreator.connectTCP} can be cancelled
to abort the connection attempt before it completes.
"""
def connect(cc):
return cc.connectUNIX('/foo/bar')
return self._cancelConnectTest(connect)
def test_cancelConnectSSL(self):
"""
The L{Deferred} returned by L{ClientCreator.connectTCP} can be cancelled
to abort the connection attempt before it completes.
"""
def connect(cc):
return cc.connectSSL('example.com', 1234, object())
return self._cancelConnectTest(connect)
def _cancelConnectTimeoutTest(self, connect):
"""
Like L{_cancelConnectTest}, but for the case where the L{Deferred} is
cancelled after the connection is set up but before it is fired with the
resulting protocol instance.
"""
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, Protocol)
d = connect(reactor, cc)
connector = reactor.connectors.pop()
# Sanity check - there is an outstanding delayed call to fire the
# Deferred.
self.assertEqual(len(reactor.getDelayedCalls()), 1)
# Cancel the Deferred, disconnecting the transport just set up and
# cancelling the delayed call.
d.cancel()
self.assertEqual(reactor.getDelayedCalls(), [])
# A real connector implementation is responsible for disconnecting the
# transport as well. For our purposes, just check that someone told the
# connector to disconnect.
self.assertTrue(connector._disconnected)
return self.assertFailure(d, CancelledError)
def test_cancelConnectTCPTimeout(self):
"""
L{ClientCreator.connectTCP} inserts a very short delayed call between
the time the connection is established and the time the L{Deferred}
returned from one of its connect methods actually fires. If the
L{Deferred} is cancelled in this interval, the established connection is
closed, the timeout is cancelled, and the L{Deferred} fails with
L{CancelledError}.
"""
def connect(reactor, cc):
d = cc.connectTCP('example.com', 1234)
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol.makeConnection(transport)
return d
return self._cancelConnectTimeoutTest(connect)
def test_cancelConnectUNIXTimeout(self):
"""
L{ClientCreator.connectUNIX} inserts a very short delayed call between
the time the connection is established and the time the L{Deferred}
returned from one of its connect methods actually fires. If the
L{Deferred} is cancelled in this interval, the established connection is
closed, the timeout is cancelled, and the L{Deferred} fails with
L{CancelledError}.
"""
def connect(reactor, cc):
d = cc.connectUNIX('/foo/bar')
address, factory, timeout, bindAddress = reactor.unixClients.pop()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol.makeConnection(transport)
return d
return self._cancelConnectTimeoutTest(connect)
def test_cancelConnectSSLTimeout(self):
"""
L{ClientCreator.connectSSL} inserts a very short delayed call between
the time the connection is established and the time the L{Deferred}
returned from one of its connect methods actually fires. If the
L{Deferred} is cancelled in this interval, the established connection is
closed, the timeout is cancelled, and the L{Deferred} fails with
L{CancelledError}.
"""
def connect(reactor, cc):
d = cc.connectSSL('example.com', 1234, object())
host, port, factory, contextFactory, timeout, bindADdress = reactor.sslClients.pop()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol.makeConnection(transport)
return d
return self._cancelConnectTimeoutTest(connect)
def _cancelConnectFailedTimeoutTest(self, connect):
"""
Like L{_cancelConnectTest}, but for the case where the L{Deferred} is
cancelled after the connection attempt has failed but before it is fired
with the resulting failure.
"""
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, Protocol)
d, factory = connect(reactor, cc)
connector = reactor.connectors.pop()
factory.clientConnectionFailed(
connector, Failure(Exception("Simulated failure")))
# Sanity check - there is an outstanding delayed call to fire the
# Deferred.
self.assertEqual(len(reactor.getDelayedCalls()), 1)
# Cancel the Deferred, cancelling the delayed call.
d.cancel()
self.assertEqual(reactor.getDelayedCalls(), [])
return self.assertFailure(d, CancelledError)
def test_cancelConnectTCPFailedTimeout(self):
"""
Similar to L{test_cancelConnectTCPTimeout}, but for the case where the
connection attempt fails.
"""
def connect(reactor, cc):
d = cc.connectTCP('example.com', 1234)
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
return d, factory
return self._cancelConnectFailedTimeoutTest(connect)
def test_cancelConnectUNIXFailedTimeout(self):
"""
Similar to L{test_cancelConnectUNIXTimeout}, but for the case where the
connection attempt fails.
"""
def connect(reactor, cc):
d = cc.connectUNIX('/foo/bar')
address, factory, timeout, bindAddress = reactor.unixClients.pop()
return d, factory
return self._cancelConnectFailedTimeoutTest(connect)
def test_cancelConnectSSLFailedTimeout(self):
"""
Similar to L{test_cancelConnectSSLTimeout}, but for the case where the
connection attempt fails.
"""
def connect(reactor, cc):
d = cc.connectSSL('example.com', 1234, object())
host, port, factory, contextFactory, timeout, bindADdress = reactor.sslClients.pop()
return d, factory
return self._cancelConnectFailedTimeoutTest(connect)
class ProtocolTests(TestCase):
"""
Tests for L{twisted.internet.protocol.Protocol}.
"""
def test_interfaces(self):
"""
L{Protocol} instances provide L{IProtocol} and L{ILoggingContext}.
"""
proto = Protocol()
self.assertTrue(verifyObject(IProtocol, proto))
self.assertTrue(verifyObject(ILoggingContext, proto))
def test_logPrefix(self):
"""
L{Protocol.logPrefix} returns the protocol class's name.
"""
class SomeThing(Protocol):
pass
self.assertEqual("SomeThing", SomeThing().logPrefix())
def test_makeConnection(self):
"""
L{Protocol.makeConnection} sets the given transport on itself, and
then calls C{connectionMade}.
"""
result = []
class SomeProtocol(Protocol):
def connectionMade(self):
result.append(self.transport)
transport = object()
protocol = SomeProtocol()
protocol.makeConnection(transport)
self.assertEqual(result, [transport])
class FactoryTests(TestCase):
"""
Tests for L{protocol.Factory}.
"""
def test_interfaces(self):
"""
L{Factory} instances provide both L{IProtocolFactory} and
L{ILoggingContext}.
"""
factory = Factory()
self.assertTrue(verifyObject(IProtocolFactory, factory))
self.assertTrue(verifyObject(ILoggingContext, factory))
def test_logPrefix(self):
"""
L{Factory.logPrefix} returns the name of the factory class.
"""
class SomeKindOfFactory(Factory):
pass
self.assertEqual("SomeKindOfFactory", SomeKindOfFactory().logPrefix())
def test_defaultBuildProtocol(self):
"""
L{Factory.buildProtocol} by default constructs a protocol by calling
its C{protocol} attribute, and attaches the factory to the result.
"""
class SomeProtocol(Protocol):
pass
f = Factory()
f.protocol = SomeProtocol
protocol = f.buildProtocol(None)
self.assertIsInstance(protocol, SomeProtocol)
self.assertIs(protocol.factory, f)
def test_forProtocol(self):
"""
L{Factory.forProtocol} constructs a Factory, passing along any
additional arguments, and sets its C{protocol} attribute to the given
Protocol subclass.
"""
class ArgTakingFactory(Factory):
def __init__(self, *args, **kwargs):
self.args, self.kwargs = args, kwargs
factory = ArgTakingFactory.forProtocol(Protocol, 1, 2, foo=12)
self.assertEqual(factory.protocol, Protocol)
self.assertEqual(factory.args, (1, 2))
self.assertEqual(factory.kwargs, {"foo": 12})
def test_doStartLoggingStatement(self):
"""
L{Factory.doStart} logs that it is starting a factory, followed by
the L{repr} of the L{Factory} instance that is being started.
"""
events = []
globalLogPublisher.addObserver(events.append)
self.addCleanup(
lambda: globalLogPublisher.removeObserver(events.append))
f = Factory()
f.doStart()
self.assertIs(events[0]['factory'], f)
self.assertEqual(events[0]['log_level'], LogLevel.info)
self.assertEqual(events[0]['log_format'],
'Starting factory {factory!r}')
def test_doStopLoggingStatement(self):
"""
L{Factory.doStop} logs that it is stopping a factory, followed by
the L{repr} of the L{Factory} instance that is being stopped.
"""
events = []
globalLogPublisher.addObserver(events.append)
self.addCleanup(
lambda: globalLogPublisher.removeObserver(events.append))
class MyFactory(Factory):
numPorts = 1
f = MyFactory()
f.doStop()
self.assertIs(events[0]['factory'], f)
self.assertEqual(events[0]['log_level'], LogLevel.info)
self.assertEqual(events[0]['log_format'],
'Stopping factory {factory!r}')
class AdapterTests(TestCase):
"""
Tests for L{ProtocolToConsumerAdapter} and L{ConsumerToProtocolAdapter}.
"""
def test_protocolToConsumer(self):
"""
L{IProtocol} providers can be adapted to L{IConsumer} providers using
L{ProtocolToConsumerAdapter}.
"""
result = []
p = Protocol()
p.dataReceived = result.append
consumer = IConsumer(p)
consumer.write(b"hello")
self.assertEqual(result, [b"hello"])
self.assertIsInstance(consumer, ProtocolToConsumerAdapter)
def test_consumerToProtocol(self):
"""
L{IConsumer} providers can be adapted to L{IProtocol} providers using
L{ProtocolToConsumerAdapter}.
"""
result = []
@implementer(IConsumer)
class Consumer(object):
def write(self, d):
result.append(d)
c = Consumer()
protocol = IProtocol(c)
protocol.dataReceived(b"hello")
self.assertEqual(result, [b"hello"])
self.assertIsInstance(protocol, ConsumerToProtocolAdapter)
| mit | 8,421,303,479,318,667,000 | 35.220339 | 96 | 0.637401 | false |
habibmasuro/moneta-1.0.1.0 | share/qt/extract_strings_qt.py | 5 | 1841 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/monetastrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *moneta_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("moneta-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit | -8,046,745,637,854,879,000 | 24.569444 | 80 | 0.570885 | false |
oktayacikalin/pyglet | contrib/layout/tests/layout/base_layout.py | 29 | 1069 | #!/usr/bin/env python
'''Base class for layout tests.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
import sys
from pyglet.gl import *
from layout import *
from pyglet.window import *
from pyglet.window.event import *
class LayoutTestBase(unittest.TestCase):
# Supply either XHTML or HTML.
xhtml = None
html = None
def on_expose(self):
glClearColor(1, 1, 1, 1)
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
self.layout.draw()
self.window.flip()
def test_main(self):
width, height = 800, 600
self.window = w = Window(width, height, visible=False)
w.push_handlers(self)
self.layout = Layout()
w.push_handlers(self.layout)
if self.xhtml:
self.layout.set_xhtml(self.xhtml)
else:
self.layout.set_html(self.html)
w.set_visible()
while not w.has_exit:
w.dispatch_events()
self.on_expose()
w.close()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 1,601,160,540,436,226,300 | 21.270833 | 62 | 0.585594 | false |
mindbody/API-Examples | SDKs/Python/test/test_get_client_referral_types_response.py | 1 | 1072 | # coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.get_client_referral_types_response import GetClientReferralTypesResponse # noqa: E501
from swagger_client.rest import ApiException
class TestGetClientReferralTypesResponse(unittest.TestCase):
"""GetClientReferralTypesResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetClientReferralTypesResponse(self):
"""Test GetClientReferralTypesResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.get_client_referral_types_response.GetClientReferralTypesResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | -3,727,456,310,154,148,000 | 25.8 | 121 | 0.723881 | false |
batxes/4Cin | SHH_WT_models_highres/SHH_WT_models_highres_final_output_0.1_-0.1_5000/SHH_WT_models_highres42525.py | 4 | 88239 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((-941.109, 2302.59, 3256.86), (0.7, 0.7, 0.7), 182.271)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((-729.188, 2294.6, 3531.14), (0.7, 0.7, 0.7), 258.199)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((-408.81, 2445.68, 3366.05), (0.7, 0.7, 0.7), 123.897)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((-669.754, 2639.8, 3624.16), (0.7, 0.7, 0.7), 146.739)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((-858.685, 2885.2, 3970.77), (0.7, 0.7, 0.7), 179.098)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((-293.001, 2788.11, 3887.9), (0.7, 0.7, 0.7), 148.854)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((214.015, 2775.43, 3859.58), (0.7, 0.7, 0.7), 196.357)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((-99.4167, 3166.84, 4002.67), (0.7, 0.7, 0.7), 166.873)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((-378.561, 3575.8, 4213.55), (0.7, 0.7, 0.7), 95.4711)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((-6.50062, 3395.52, 4161.4), (0.7, 0.7, 0.7), 185.401)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((277.021, 3029.54, 4063.34), (0.7, 0.7, 0.7), 151.984)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((580.314, 2539.09, 3894.17), (0.7, 0.7, 0.7), 185.612)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((593.941, 2173.03, 4067.36), (0.7, 0.7, 0.7), 210.273)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((361.721, 2401.96, 4081.61), (0.7, 0.7, 0.7), 106.892)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((264.295, 2181.18, 4365.26), (0.7, 0.7, 0.7), 202.025)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((47.4003, 1770.91, 4558.1), (0.7, 0.7, 0.7), 192.169)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((-161.526, 1240.61, 4698.31), (0.7, 0.7, 0.7), 241.11)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((-146.925, 727.373, 4645.28), (0.7, 0.7, 0.7), 128.465)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((-252.404, 197.739, 4507.66), (0.7, 0.7, 0.7), 217.38)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((-702.076, -328.487, 4479.93), (0.7, 0.7, 0.7), 184.555)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((-143.781, -88.9417, 4252.93), (0.7, 0.7, 0.7), 140.055)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((306.855, -65.4159, 4412.73), (0.7, 0.7, 0.7), 169.708)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((710.075, -261.613, 4581.17), (0.7, 0.7, 0.7), 184.639)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((1002.73, -335.081, 4358.23), (0.7, 0.7, 0.7), 119.286)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((1041.8, -462.017, 4050.84), (0.7, 0.7, 0.7), 147.754)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((898.059, -421.996, 3767.54), (0.7, 0.7, 0.7), 171.4)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((918.649, -26.3429, 3912.26), (0.7, 0.7, 0.7), 156.341)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((938.588, 512.652, 3663.82), (0.7, 0.7, 0.7), 186.501)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((1041.38, 1010.29, 3423), (0.7, 0.7, 0.7), 308.325)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((1060.34, 1462.39, 3494.74), (0.7, 0.7, 0.7), 138.617)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((1222.04, 1708.63, 3594.43), (0.7, 0.7, 0.7), 130.03)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((1182.29, 1405.03, 3672.36), (0.7, 0.7, 0.7), 156.552)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((970.807, 1359.96, 3462.36), (0.7, 0.7, 0.7), 183.244)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((779.198, 1314.23, 3269.07), (0.7, 0.7, 0.7), 181.382)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((790.583, 1184.31, 3111.7), (0.7, 0.7, 0.7), 101.943)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((572.663, 913.216, 3010.09), (1, 0.7, 0), 138.913)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((1282.42, 590.582, 2249.04), (0.7, 0.7, 0.7), 221.737)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((1933.04, 792.812, 1727.43), (0.7, 0.7, 0.7), 256.38)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((2143.14, 1304.84, 1263.36), (0.7, 0.7, 0.7), 221.694)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((2069.59, 2026.21, 1317.35), (0.7, 0.7, 0.7), 259.341)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((2041.54, 2132.61, 2067.11), (0.7, 0.7, 0.7), 117.89)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((1815.69, 1899.79, 2811.15), (0.7, 0.7, 0.7), 116.071)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((1421.56, 1695.08, 2993.07), (0.7, 0.7, 0.7), 268.224)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((1329.96, 1871.41, 2752.82), (0.7, 0.7, 0.7), 386.918)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((1370.02, 2466.34, 2656.58), (0.7, 0.7, 0.7), 121.316)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((1240.55, 2744.91, 2291.59), (0.7, 0.7, 0.7), 138.363)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((979.405, 2096.51, 2317.04), (1, 0.7, 0), 175.207)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((904.187, 2588.16, 1815.99), (0.7, 0.7, 0.7), 131.468)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((780.514, 2887.32, 1161.34), (0.7, 0.7, 0.7), 287.894)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((1196.14, 2576.09, 1281.44), (0.7, 0.7, 0.7), 88.1109)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((1435.87, 2310.44, 1783.76), (0.7, 0.7, 0.7), 145.385)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((1606.13, 2271.66, 1923.93), (0.7, 0.7, 0.7), 155.452)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((1673.86, 2579.6, 1373.71), (0.7, 0.7, 0.7), 145.512)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((1753.5, 2883.74, 974.618), (0.7, 0.7, 0.7), 99.9972)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((1897.94, 3200.24, 698.727), (0.7, 0.7, 0.7), 327.529)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((2149.21, 3282.92, 1290.86), (0.7, 0.7, 0.7), 137.983)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((2226.93, 2950.2, 1669.83), (0.7, 0.7, 0.7), 83.3733)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((2208.12, 2519.07, 2047.16), (0.7, 0.7, 0.7), 101.562)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((2048.46, 2133.4, 2364.74), (0.7, 0.7, 0.7), 165.689)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((1884.57, 1983.98, 2153.45), (0.7, 0.7, 0.7), 136.925)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((1833.71, 1941.27, 2045.21), (0.7, 0.7, 0.7), 123.389)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((2119.55, 2168.34, 1788.49), (0.7, 0.7, 0.7), 184.47)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((2638.22, 2489.19, 1263.59), (0.7, 0.7, 0.7), 148.473)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((3218.91, 2839.74, 555.961), (0.7, 0.7, 0.7), 241.406)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((2920.42, 3052.36, 1100.83), (0.7, 0.7, 0.7), 182.736)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((2572.94, 3074.74, 1405.16), (0.7, 0.7, 0.7), 166.62)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((2457.44, 2796.01, 1375.33), (0.7, 0.7, 0.7), 113.872)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((2249.21, 2612.83, 1547.4), (0.7, 0.7, 0.7), 110.065)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((1900.36, 2455.73, 1552.47), (0.7, 0.7, 0.7), 150.08)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((1462.24, 2312.22, 1434.18), (0.7, 0.7, 0.7), 118.525)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((1087.97, 2072.09, 1130.88), (0.7, 0.7, 0.7), 163.955)
if "particle_71 geometry" not in marker_sets:
s=new_marker_set('particle_71 geometry')
marker_sets["particle_71 geometry"]=s
s= marker_sets["particle_71 geometry"]
mark=s.place_marker((1189.15, 1946.82, 759.803), (0.7, 0.7, 0.7), 170.131)
if "particle_72 geometry" not in marker_sets:
s=new_marker_set('particle_72 geometry')
marker_sets["particle_72 geometry"]=s
s= marker_sets["particle_72 geometry"]
mark=s.place_marker((1848.02, 2274.56, 591.821), (0.7, 0.7, 0.7), 78.2127)
if "particle_73 geometry" not in marker_sets:
s=new_marker_set('particle_73 geometry')
marker_sets["particle_73 geometry"]=s
s= marker_sets["particle_73 geometry"]
mark=s.place_marker((2524.61, 2752.48, 513.496), (0.7, 0.7, 0.7), 251.896)
if "particle_74 geometry" not in marker_sets:
s=new_marker_set('particle_74 geometry')
marker_sets["particle_74 geometry"]=s
s= marker_sets["particle_74 geometry"]
mark=s.place_marker((2986.72, 3250.02, 628.4), (0.7, 0.7, 0.7), 167.55)
if "particle_75 geometry" not in marker_sets:
s=new_marker_set('particle_75 geometry')
marker_sets["particle_75 geometry"]=s
s= marker_sets["particle_75 geometry"]
mark=s.place_marker((3140.07, 3557.66, 882.644), (0.7, 0.7, 0.7), 167.846)
if "particle_76 geometry" not in marker_sets:
s=new_marker_set('particle_76 geometry')
marker_sets["particle_76 geometry"]=s
s= marker_sets["particle_76 geometry"]
mark=s.place_marker((3413.92, 3217.39, 665.569), (0.7, 0.7, 0.7), 259.68)
if "particle_77 geometry" not in marker_sets:
s=new_marker_set('particle_77 geometry')
marker_sets["particle_77 geometry"]=s
s= marker_sets["particle_77 geometry"]
mark=s.place_marker((3215.19, 2939.93, 359.043), (0.7, 0.7, 0.7), 80.2854)
if "particle_78 geometry" not in marker_sets:
s=new_marker_set('particle_78 geometry')
marker_sets["particle_78 geometry"]=s
s= marker_sets["particle_78 geometry"]
mark=s.place_marker((3243.15, 3013.53, 180.952), (0.7, 0.7, 0.7), 82.4427)
if "particle_79 geometry" not in marker_sets:
s=new_marker_set('particle_79 geometry')
marker_sets["particle_79 geometry"]=s
s= marker_sets["particle_79 geometry"]
mark=s.place_marker((3535.93, 2996.81, -20.3374), (0.7, 0.7, 0.7), 212.811)
if "particle_80 geometry" not in marker_sets:
s=new_marker_set('particle_80 geometry')
marker_sets["particle_80 geometry"]=s
s= marker_sets["particle_80 geometry"]
mark=s.place_marker((3905.34, 2650.24, 522.813), (0.7, 0.7, 0.7), 176.391)
if "particle_81 geometry" not in marker_sets:
s=new_marker_set('particle_81 geometry')
marker_sets["particle_81 geometry"]=s
s= marker_sets["particle_81 geometry"]
mark=s.place_marker((3705.49, 2398.89, 1171.52), (0.7, 0.7, 0.7), 99.3204)
if "particle_82 geometry" not in marker_sets:
s=new_marker_set('particle_82 geometry')
marker_sets["particle_82 geometry"]=s
s= marker_sets["particle_82 geometry"]
mark=s.place_marker((3287.9, 2104.07, 1483.03), (0.7, 0.7, 0.7), 166.62)
if "particle_83 geometry" not in marker_sets:
s=new_marker_set('particle_83 geometry')
marker_sets["particle_83 geometry"]=s
s= marker_sets["particle_83 geometry"]
mark=s.place_marker((3205.79, 1816.39, 1565.19), (0.7, 0.7, 0.7), 102.831)
if "particle_84 geometry" not in marker_sets:
s=new_marker_set('particle_84 geometry')
marker_sets["particle_84 geometry"]=s
s= marker_sets["particle_84 geometry"]
mark=s.place_marker((3858.79, 2037.02, 1017.02), (0.7, 0.7, 0.7), 65.0997)
if "particle_85 geometry" not in marker_sets:
s=new_marker_set('particle_85 geometry')
marker_sets["particle_85 geometry"]=s
s= marker_sets["particle_85 geometry"]
mark=s.place_marker((3563.08, 2507.48, 999.123), (0.7, 0.7, 0.7), 92.1294)
if "particle_86 geometry" not in marker_sets:
s=new_marker_set('particle_86 geometry')
marker_sets["particle_86 geometry"]=s
s= marker_sets["particle_86 geometry"]
mark=s.place_marker((3089.5, 2807.92, 1225.22), (0.7, 0.7, 0.7), 194.791)
if "particle_87 geometry" not in marker_sets:
s=new_marker_set('particle_87 geometry')
marker_sets["particle_87 geometry"]=s
s= marker_sets["particle_87 geometry"]
mark=s.place_marker((2823.21, 3116.18, 1437.48), (0.7, 0.7, 0.7), 120.766)
if "particle_88 geometry" not in marker_sets:
s=new_marker_set('particle_88 geometry')
marker_sets["particle_88 geometry"]=s
s= marker_sets["particle_88 geometry"]
mark=s.place_marker((3183.29, 3516.64, 1241.14), (0.7, 0.7, 0.7), 217.803)
if "particle_89 geometry" not in marker_sets:
s=new_marker_set('particle_89 geometry')
marker_sets["particle_89 geometry"]=s
s= marker_sets["particle_89 geometry"]
mark=s.place_marker((2997.09, 3333.06, 962.534), (0.7, 0.7, 0.7), 115.775)
if "particle_90 geometry" not in marker_sets:
s=new_marker_set('particle_90 geometry')
marker_sets["particle_90 geometry"]=s
s= marker_sets["particle_90 geometry"]
mark=s.place_marker((2692.24, 3080.79, 864.528), (0.7, 0.7, 0.7), 115.648)
if "particle_91 geometry" not in marker_sets:
s=new_marker_set('particle_91 geometry')
marker_sets["particle_91 geometry"]=s
s= marker_sets["particle_91 geometry"]
mark=s.place_marker((2616.47, 2957.85, 1165.11), (0.7, 0.7, 0.7), 83.8386)
if "particle_92 geometry" not in marker_sets:
s=new_marker_set('particle_92 geometry')
marker_sets["particle_92 geometry"]=s
s= marker_sets["particle_92 geometry"]
mark=s.place_marker((2668.85, 3204.96, 1426.84), (0.7, 0.7, 0.7), 124.32)
if "particle_93 geometry" not in marker_sets:
s=new_marker_set('particle_93 geometry')
marker_sets["particle_93 geometry"]=s
s= marker_sets["particle_93 geometry"]
mark=s.place_marker((2527.45, 3540.12, 1660.79), (0.7, 0.7, 0.7), 185.993)
if "particle_94 geometry" not in marker_sets:
s=new_marker_set('particle_94 geometry')
marker_sets["particle_94 geometry"]=s
s= marker_sets["particle_94 geometry"]
mark=s.place_marker((2229.38, 4054.12, 1496.62), (0.7, 0.7, 0.7), 238.826)
if "particle_95 geometry" not in marker_sets:
s=new_marker_set('particle_95 geometry')
marker_sets["particle_95 geometry"]=s
s= marker_sets["particle_95 geometry"]
mark=s.place_marker((1977.19, 4263.59, 1058.35), (0.7, 0.7, 0.7), 128.465)
if "particle_96 geometry" not in marker_sets:
s=new_marker_set('particle_96 geometry')
marker_sets["particle_96 geometry"]=s
s= marker_sets["particle_96 geometry"]
mark=s.place_marker((1997.29, 3675.21, 837.188), (0.7, 0.7, 0.7), 203.209)
if "particle_97 geometry" not in marker_sets:
s=new_marker_set('particle_97 geometry')
marker_sets["particle_97 geometry"]=s
s= marker_sets["particle_97 geometry"]
mark=s.place_marker((2241.59, 3282.84, 1034.23), (0.7, 0.7, 0.7), 160.486)
if "particle_98 geometry" not in marker_sets:
s=new_marker_set('particle_98 geometry')
marker_sets["particle_98 geometry"]=s
s= marker_sets["particle_98 geometry"]
mark=s.place_marker((2425.96, 3513.89, 1201.86), (0.7, 0.7, 0.7), 149.277)
if "particle_99 geometry" not in marker_sets:
s=new_marker_set('particle_99 geometry')
marker_sets["particle_99 geometry"]=s
s= marker_sets["particle_99 geometry"]
mark=s.place_marker((2535.8, 3823.23, 775.878), (0.7, 0.7, 0.7), 35.7435)
if "particle_100 geometry" not in marker_sets:
s=new_marker_set('particle_100 geometry')
marker_sets["particle_100 geometry"]=s
s= marker_sets["particle_100 geometry"]
mark=s.place_marker((2382.31, 2906.89, 1115.03), (0.7, 0.7, 0.7), 98.3898)
if "particle_101 geometry" not in marker_sets:
s=new_marker_set('particle_101 geometry')
marker_sets["particle_101 geometry"]=s
s= marker_sets["particle_101 geometry"]
mark=s.place_marker((2270.64, 2033.2, 1682.12), (0.7, 0.7, 0.7), 188.404)
if "particle_102 geometry" not in marker_sets:
s=new_marker_set('particle_102 geometry')
marker_sets["particle_102 geometry"]=s
s= marker_sets["particle_102 geometry"]
mark=s.place_marker((2376.51, 1912.88, 2190.6), (0.7, 0.7, 0.7), 110.318)
if "particle_103 geometry" not in marker_sets:
s=new_marker_set('particle_103 geometry')
marker_sets["particle_103 geometry"]=s
s= marker_sets["particle_103 geometry"]
mark=s.place_marker((2646.42, 2106.48, 1958.54), (0.7, 0.7, 0.7), 127.534)
if "particle_104 geometry" not in marker_sets:
s=new_marker_set('particle_104 geometry')
marker_sets["particle_104 geometry"]=s
s= marker_sets["particle_104 geometry"]
mark=s.place_marker((2725.45, 2382.51, 1695.58), (0.7, 0.7, 0.7), 91.368)
if "particle_105 geometry" not in marker_sets:
s=new_marker_set('particle_105 geometry')
marker_sets["particle_105 geometry"]=s
s= marker_sets["particle_105 geometry"]
mark=s.place_marker((2679.82, 2694.74, 1439.69), (0.7, 0.7, 0.7), 131.045)
if "particle_106 geometry" not in marker_sets:
s=new_marker_set('particle_106 geometry')
marker_sets["particle_106 geometry"]=s
s= marker_sets["particle_106 geometry"]
mark=s.place_marker((2431.71, 2990.32, 1264.05), (0.7, 0.7, 0.7), 143.608)
if "particle_107 geometry" not in marker_sets:
s=new_marker_set('particle_107 geometry')
marker_sets["particle_107 geometry"]=s
s= marker_sets["particle_107 geometry"]
mark=s.place_marker((2363.6, 3344.46, 1396.31), (0.7, 0.7, 0.7), 135.783)
if "particle_108 geometry" not in marker_sets:
s=new_marker_set('particle_108 geometry')
marker_sets["particle_108 geometry"]=s
s= marker_sets["particle_108 geometry"]
mark=s.place_marker((2330.94, 3638.51, 1544.12), (0.7, 0.7, 0.7), 92.5947)
if "particle_109 geometry" not in marker_sets:
s=new_marker_set('particle_109 geometry')
marker_sets["particle_109 geometry"]=s
s= marker_sets["particle_109 geometry"]
mark=s.place_marker((2477.64, 3579.45, 1770.7), (0.7, 0.7, 0.7), 150.123)
if "particle_110 geometry" not in marker_sets:
s=new_marker_set('particle_110 geometry')
marker_sets["particle_110 geometry"]=s
s= marker_sets["particle_110 geometry"]
mark=s.place_marker((2482.55, 3610.75, 1963.09), (0.7, 0.7, 0.7), 121.57)
if "particle_111 geometry" not in marker_sets:
s=new_marker_set('particle_111 geometry')
marker_sets["particle_111 geometry"]=s
s= marker_sets["particle_111 geometry"]
mark=s.place_marker((2684.95, 3837.42, 2065.65), (0.7, 0.7, 0.7), 104.777)
if "particle_112 geometry" not in marker_sets:
s=new_marker_set('particle_112 geometry')
marker_sets["particle_112 geometry"]=s
s= marker_sets["particle_112 geometry"]
mark=s.place_marker((2688.44, 3549.44, 2340.68), (0.7, 0.7, 0.7), 114.844)
if "particle_113 geometry" not in marker_sets:
s=new_marker_set('particle_113 geometry')
marker_sets["particle_113 geometry"]=s
s= marker_sets["particle_113 geometry"]
mark=s.place_marker((2714.34, 3228.06, 2611.77), (0.7, 0.7, 0.7), 150.588)
if "particle_114 geometry" not in marker_sets:
s=new_marker_set('particle_114 geometry')
marker_sets["particle_114 geometry"]=s
s= marker_sets["particle_114 geometry"]
mark=s.place_marker((2669.06, 2857.94, 2455.95), (0.7, 0.7, 0.7), 103.55)
if "particle_115 geometry" not in marker_sets:
s=new_marker_set('particle_115 geometry')
marker_sets["particle_115 geometry"]=s
s= marker_sets["particle_115 geometry"]
mark=s.place_marker((2868.13, 2516.41, 2105.02), (0.7, 0.7, 0.7), 215.392)
if "particle_116 geometry" not in marker_sets:
s=new_marker_set('particle_116 geometry')
marker_sets["particle_116 geometry"]=s
s= marker_sets["particle_116 geometry"]
mark=s.place_marker((3034.75, 2064.45, 1829.4), (0.7, 0.7, 0.7), 99.9126)
if "particle_117 geometry" not in marker_sets:
s=new_marker_set('particle_117 geometry')
marker_sets["particle_117 geometry"]=s
s= marker_sets["particle_117 geometry"]
mark=s.place_marker((3619, 1972.42, 1382.48), (0.7, 0.7, 0.7), 99.7857)
if "particle_118 geometry" not in marker_sets:
s=new_marker_set('particle_118 geometry')
marker_sets["particle_118 geometry"]=s
s= marker_sets["particle_118 geometry"]
mark=s.place_marker((3947.29, 2028.18, 925.01), (0.7, 0.7, 0.7), 109.98)
if "particle_119 geometry" not in marker_sets:
s=new_marker_set('particle_119 geometry')
marker_sets["particle_119 geometry"]=s
s= marker_sets["particle_119 geometry"]
mark=s.place_marker((3761.67, 2228.88, 1374.01), (0.7, 0.7, 0.7), 102.831)
if "particle_120 geometry" not in marker_sets:
s=new_marker_set('particle_120 geometry')
marker_sets["particle_120 geometry"]=s
s= marker_sets["particle_120 geometry"]
mark=s.place_marker((3459.89, 2383.45, 1578.7), (0.7, 0.7, 0.7), 103.593)
if "particle_121 geometry" not in marker_sets:
s=new_marker_set('particle_121 geometry')
marker_sets["particle_121 geometry"]=s
s= marker_sets["particle_121 geometry"]
mark=s.place_marker((3086.44, 2654.63, 1666.6), (0.7, 0.7, 0.7), 173.472)
if "particle_122 geometry" not in marker_sets:
s=new_marker_set('particle_122 geometry')
marker_sets["particle_122 geometry"]=s
s= marker_sets["particle_122 geometry"]
mark=s.place_marker((2923.1, 3083.31, 1363.79), (0.7, 0.7, 0.7), 113.575)
if "particle_123 geometry" not in marker_sets:
s=new_marker_set('particle_123 geometry')
marker_sets["particle_123 geometry"]=s
s= marker_sets["particle_123 geometry"]
mark=s.place_marker((2630.03, 3400.37, 1456.08), (0.7, 0.7, 0.7), 128.296)
if "particle_124 geometry" not in marker_sets:
s=new_marker_set('particle_124 geometry')
marker_sets["particle_124 geometry"]=s
s= marker_sets["particle_124 geometry"]
mark=s.place_marker((2439.87, 3738.94, 1568.63), (0.7, 0.7, 0.7), 145.004)
if "particle_125 geometry" not in marker_sets:
s=new_marker_set('particle_125 geometry')
marker_sets["particle_125 geometry"]=s
s= marker_sets["particle_125 geometry"]
mark=s.place_marker((2197.33, 4007.93, 1875.69), (0.7, 0.7, 0.7), 148.261)
if "particle_126 geometry" not in marker_sets:
s=new_marker_set('particle_126 geometry')
marker_sets["particle_126 geometry"]=s
s= marker_sets["particle_126 geometry"]
mark=s.place_marker((1977.22, 4548.72, 1876.12), (0.7, 0.7, 0.7), 127.704)
if "particle_127 geometry" not in marker_sets:
s=new_marker_set('particle_127 geometry')
marker_sets["particle_127 geometry"]=s
s= marker_sets["particle_127 geometry"]
mark=s.place_marker((1726.3, 4986.7, 1646.56), (0.7, 0.7, 0.7), 129.607)
if "particle_128 geometry" not in marker_sets:
s=new_marker_set('particle_128 geometry')
marker_sets["particle_128 geometry"]=s
s= marker_sets["particle_128 geometry"]
mark=s.place_marker((1892.58, 4634.95, 1278), (0.7, 0.7, 0.7), 139.759)
if "particle_129 geometry" not in marker_sets:
s=new_marker_set('particle_129 geometry')
marker_sets["particle_129 geometry"]=s
s= marker_sets["particle_129 geometry"]
mark=s.place_marker((2074.23, 3996.14, 1046.8), (0.7, 0.7, 0.7), 118.567)
if "particle_130 geometry" not in marker_sets:
s=new_marker_set('particle_130 geometry')
marker_sets["particle_130 geometry"]=s
s= marker_sets["particle_130 geometry"]
mark=s.place_marker((2473.76, 3745.91, 1016.34), (0.7, 0.7, 0.7), 136.164)
if "particle_131 geometry" not in marker_sets:
s=new_marker_set('particle_131 geometry')
marker_sets["particle_131 geometry"]=s
s= marker_sets["particle_131 geometry"]
mark=s.place_marker((2775.23, 3419.21, 1181.79), (0.7, 0.7, 0.7), 121.655)
if "particle_132 geometry" not in marker_sets:
s=new_marker_set('particle_132 geometry')
marker_sets["particle_132 geometry"]=s
s= marker_sets["particle_132 geometry"]
mark=s.place_marker((3070, 3236.08, 1444.97), (0.7, 0.7, 0.7), 127.492)
if "particle_133 geometry" not in marker_sets:
s=new_marker_set('particle_133 geometry')
marker_sets["particle_133 geometry"]=s
s= marker_sets["particle_133 geometry"]
mark=s.place_marker((3499.91, 3262.91, 1498.35), (0.7, 0.7, 0.7), 138.617)
if "particle_134 geometry" not in marker_sets:
s=new_marker_set('particle_134 geometry')
marker_sets["particle_134 geometry"]=s
s= marker_sets["particle_134 geometry"]
mark=s.place_marker((3674.62, 3070.76, 1775.37), (0.7, 0.7, 0.7), 120.766)
if "particle_135 geometry" not in marker_sets:
s=new_marker_set('particle_135 geometry')
marker_sets["particle_135 geometry"]=s
s= marker_sets["particle_135 geometry"]
mark=s.place_marker((3682.63, 2735.76, 1860.62), (0.7, 0.7, 0.7), 145.893)
if "particle_136 geometry" not in marker_sets:
s=new_marker_set('particle_136 geometry')
marker_sets["particle_136 geometry"]=s
s= marker_sets["particle_136 geometry"]
mark=s.place_marker((3260.49, 2493.99, 1854.79), (0.7, 0.7, 0.7), 185.02)
if "particle_137 geometry" not in marker_sets:
s=new_marker_set('particle_137 geometry')
marker_sets["particle_137 geometry"]=s
s= marker_sets["particle_137 geometry"]
mark=s.place_marker((2839.83, 2237.81, 2053.82), (0.7, 0.7, 0.7), 221.314)
if "particle_138 geometry" not in marker_sets:
s=new_marker_set('particle_138 geometry')
marker_sets["particle_138 geometry"]=s
s= marker_sets["particle_138 geometry"]
mark=s.place_marker((2511.97, 2188.47, 2407.53), (0.7, 0.7, 0.7), 165.139)
if "particle_139 geometry" not in marker_sets:
s=new_marker_set('particle_139 geometry')
marker_sets["particle_139 geometry"]=s
s= marker_sets["particle_139 geometry"]
mark=s.place_marker((2403.4, 2133.41, 2204.45), (0.7, 0.7, 0.7), 179.437)
if "particle_140 geometry" not in marker_sets:
s=new_marker_set('particle_140 geometry')
marker_sets["particle_140 geometry"]=s
s= marker_sets["particle_140 geometry"]
mark=s.place_marker((2628.51, 1992.52, 1901.14), (0.7, 0.7, 0.7), 137.898)
if "particle_141 geometry" not in marker_sets:
s=new_marker_set('particle_141 geometry')
marker_sets["particle_141 geometry"]=s
s= marker_sets["particle_141 geometry"]
mark=s.place_marker((2856.13, 2098.08, 1696.85), (0.7, 0.7, 0.7), 124.658)
if "particle_142 geometry" not in marker_sets:
s=new_marker_set('particle_142 geometry')
marker_sets["particle_142 geometry"]=s
s= marker_sets["particle_142 geometry"]
mark=s.place_marker((3131.56, 2329.51, 1704.03), (0.7, 0.7, 0.7), 97.7553)
if "particle_143 geometry" not in marker_sets:
s=new_marker_set('particle_143 geometry')
marker_sets["particle_143 geometry"]=s
s= marker_sets["particle_143 geometry"]
mark=s.place_marker((3390.92, 2503.07, 1664.83), (0.7, 0.7, 0.7), 92.9331)
if "particle_144 geometry" not in marker_sets:
s=new_marker_set('particle_144 geometry')
marker_sets["particle_144 geometry"]=s
s= marker_sets["particle_144 geometry"]
mark=s.place_marker((3645.15, 2642.4, 1493.88), (0.7, 0.7, 0.7), 123.135)
if "particle_145 geometry" not in marker_sets:
s=new_marker_set('particle_145 geometry')
marker_sets["particle_145 geometry"]=s
s= marker_sets["particle_145 geometry"]
mark=s.place_marker((3335.63, 2446.31, 1635.19), (0.7, 0.7, 0.7), 125.716)
if "particle_146 geometry" not in marker_sets:
s=new_marker_set('particle_146 geometry')
marker_sets["particle_146 geometry"]=s
s= marker_sets["particle_146 geometry"]
mark=s.place_marker((3059.77, 2492.39, 1810.63), (0.7, 0.7, 0.7), 127.534)
if "particle_147 geometry" not in marker_sets:
s=new_marker_set('particle_147 geometry')
marker_sets["particle_147 geometry"]=s
s= marker_sets["particle_147 geometry"]
mark=s.place_marker((2881.19, 2743.62, 1781.33), (0.7, 0.7, 0.7), 94.9212)
if "particle_148 geometry" not in marker_sets:
s=new_marker_set('particle_148 geometry')
marker_sets["particle_148 geometry"]=s
s= marker_sets["particle_148 geometry"]
mark=s.place_marker((2517.42, 2644.46, 2017.97), (0.7, 0.7, 0.7), 137.644)
if "particle_149 geometry" not in marker_sets:
s=new_marker_set('particle_149 geometry')
marker_sets["particle_149 geometry"]=s
s= marker_sets["particle_149 geometry"]
mark=s.place_marker((2239.04, 2686.54, 2242.25), (0.7, 0.7, 0.7), 149.277)
if "particle_150 geometry" not in marker_sets:
s=new_marker_set('particle_150 geometry')
marker_sets["particle_150 geometry"]=s
s= marker_sets["particle_150 geometry"]
mark=s.place_marker((2495.03, 2823.71, 2438.98), (0.7, 0.7, 0.7), 103.677)
if "particle_151 geometry" not in marker_sets:
s=new_marker_set('particle_151 geometry')
marker_sets["particle_151 geometry"]=s
s= marker_sets["particle_151 geometry"]
mark=s.place_marker((2841.84, 3143.77, 2512.62), (0.7, 0.7, 0.7), 99.6588)
if "particle_152 geometry" not in marker_sets:
s=new_marker_set('particle_152 geometry')
marker_sets["particle_152 geometry"]=s
s= marker_sets["particle_152 geometry"]
mark=s.place_marker((3095.92, 3405.25, 2573.21), (0.7, 0.7, 0.7), 134.133)
if "particle_153 geometry" not in marker_sets:
s=new_marker_set('particle_153 geometry')
marker_sets["particle_153 geometry"]=s
s= marker_sets["particle_153 geometry"]
mark=s.place_marker((3142.93, 3069.09, 2507.9), (0.7, 0.7, 0.7), 173.007)
if "particle_154 geometry" not in marker_sets:
s=new_marker_set('particle_154 geometry')
marker_sets["particle_154 geometry"]=s
s= marker_sets["particle_154 geometry"]
mark=s.place_marker((2753.11, 2649.65, 2526.84), (0.7, 0.7, 0.7), 141.028)
if "particle_155 geometry" not in marker_sets:
s=new_marker_set('particle_155 geometry')
marker_sets["particle_155 geometry"]=s
s= marker_sets["particle_155 geometry"]
mark=s.place_marker((2431.77, 2317.65, 2451.94), (0.7, 0.7, 0.7), 161.121)
if "particle_156 geometry" not in marker_sets:
s=new_marker_set('particle_156 geometry')
marker_sets["particle_156 geometry"]=s
s= marker_sets["particle_156 geometry"]
mark=s.place_marker((2212.77, 2450.69, 2219.08), (0.7, 0.7, 0.7), 119.582)
if "particle_157 geometry" not in marker_sets:
s=new_marker_set('particle_157 geometry')
marker_sets["particle_157 geometry"]=s
s= marker_sets["particle_157 geometry"]
mark=s.place_marker((2407.76, 2725.12, 1979.31), (0.7, 0.7, 0.7), 137.094)
if "particle_158 geometry" not in marker_sets:
s=new_marker_set('particle_158 geometry')
marker_sets["particle_158 geometry"]=s
s= marker_sets["particle_158 geometry"]
mark=s.place_marker((2742.72, 2849.96, 1622.83), (0.7, 0.7, 0.7), 149.234)
if "particle_159 geometry" not in marker_sets:
s=new_marker_set('particle_159 geometry')
marker_sets["particle_159 geometry"]=s
s= marker_sets["particle_159 geometry"]
mark=s.place_marker((2851.09, 2420.87, 1526.48), (0.7, 0.7, 0.7), 151.011)
if "particle_160 geometry" not in marker_sets:
s=new_marker_set('particle_160 geometry')
marker_sets["particle_160 geometry"]=s
s= marker_sets["particle_160 geometry"]
mark=s.place_marker((2792.63, 1938.23, 1764.59), (0.7, 0.7, 0.7), 184.216)
if "particle_161 geometry" not in marker_sets:
s=new_marker_set('particle_161 geometry')
marker_sets["particle_161 geometry"]=s
s= marker_sets["particle_161 geometry"]
mark=s.place_marker((3042.04, 1915.27, 2097.54), (0.7, 0.7, 0.7), 170.596)
if "particle_162 geometry" not in marker_sets:
s=new_marker_set('particle_162 geometry')
marker_sets["particle_162 geometry"]=s
s= marker_sets["particle_162 geometry"]
mark=s.place_marker((3532.42, 2228.55, 1818.75), (0.7, 0.7, 0.7), 215.603)
if "particle_163 geometry" not in marker_sets:
s=new_marker_set('particle_163 geometry')
marker_sets["particle_163 geometry"]=s
s= marker_sets["particle_163 geometry"]
mark=s.place_marker((4178.17, 2650.51, 1357.37), (0.7, 0.7, 0.7), 79.0164)
if "particle_164 geometry" not in marker_sets:
s=new_marker_set('particle_164 geometry')
marker_sets["particle_164 geometry"]=s
s= marker_sets["particle_164 geometry"]
mark=s.place_marker((4304.92, 2884.27, 1571.47), (0.7, 0.7, 0.7), 77.2821)
if "particle_165 geometry" not in marker_sets:
s=new_marker_set('particle_165 geometry')
marker_sets["particle_165 geometry"]=s
s= marker_sets["particle_165 geometry"]
mark=s.place_marker((4059.75, 2999.03, 1802.34), (0.7, 0.7, 0.7), 188.658)
if "particle_166 geometry" not in marker_sets:
s=new_marker_set('particle_166 geometry')
marker_sets["particle_166 geometry"]=s
s= marker_sets["particle_166 geometry"]
mark=s.place_marker((4189.57, 2894.64, 2067.97), (0.7, 0.7, 0.7), 115.437)
if "particle_167 geometry" not in marker_sets:
s=new_marker_set('particle_167 geometry')
marker_sets["particle_167 geometry"]=s
s= marker_sets["particle_167 geometry"]
mark=s.place_marker((3638.66, 2647.09, 2120.65), (0.7, 0.7, 0.7), 88.4916)
if "particle_168 geometry" not in marker_sets:
s=new_marker_set('particle_168 geometry')
marker_sets["particle_168 geometry"]=s
s= marker_sets["particle_168 geometry"]
mark=s.place_marker((3063.95, 2387.89, 2168.57), (0.7, 0.7, 0.7), 108.88)
if "particle_169 geometry" not in marker_sets:
s=new_marker_set('particle_169 geometry')
marker_sets["particle_169 geometry"]=s
s= marker_sets["particle_169 geometry"]
mark=s.place_marker((2801.34, 2184.25, 2020.01), (0.7, 0.7, 0.7), 172.119)
if "particle_170 geometry" not in marker_sets:
s=new_marker_set('particle_170 geometry')
marker_sets["particle_170 geometry"]=s
s= marker_sets["particle_170 geometry"]
mark=s.place_marker((3218.87, 2300.34, 1800), (0.7, 0.7, 0.7), 139.505)
if "particle_171 geometry" not in marker_sets:
s=new_marker_set('particle_171 geometry')
marker_sets["particle_171 geometry"]=s
s= marker_sets["particle_171 geometry"]
mark=s.place_marker((3648.4, 2422.17, 1573.77), (0.7, 0.7, 0.7), 92.7639)
if "particle_172 geometry" not in marker_sets:
s=new_marker_set('particle_172 geometry')
marker_sets["particle_172 geometry"]=s
s= marker_sets["particle_172 geometry"]
mark=s.place_marker((3561.3, 2225.97, 1432.61), (0.7, 0.7, 0.7), 89.8452)
if "particle_173 geometry" not in marker_sets:
s=new_marker_set('particle_173 geometry')
marker_sets["particle_173 geometry"]=s
s= marker_sets["particle_173 geometry"]
mark=s.place_marker((3581.76, 2067.48, 1676.9), (0.7, 0.7, 0.7), 149.446)
if "particle_174 geometry" not in marker_sets:
s=new_marker_set('particle_174 geometry')
marker_sets["particle_174 geometry"]=s
s= marker_sets["particle_174 geometry"]
mark=s.place_marker((3862.96, 2101.76, 1849.93), (0.7, 0.7, 0.7), 126.858)
if "particle_175 geometry" not in marker_sets:
s=new_marker_set('particle_175 geometry')
marker_sets["particle_175 geometry"]=s
s= marker_sets["particle_175 geometry"]
mark=s.place_marker((3970.05, 2302.16, 1599.09), (0.7, 0.7, 0.7), 106.046)
if "particle_176 geometry" not in marker_sets:
s=new_marker_set('particle_176 geometry')
marker_sets["particle_176 geometry"]=s
s= marker_sets["particle_176 geometry"]
mark=s.place_marker((3726.87, 2436.65, 1165.11), (0.7, 0.7, 0.7), 156.298)
if "particle_177 geometry" not in marker_sets:
s=new_marker_set('particle_177 geometry')
marker_sets["particle_177 geometry"]=s
s= marker_sets["particle_177 geometry"]
mark=s.place_marker((3406.82, 2672.94, 703.957), (0.7, 0.7, 0.7), 231.212)
if "particle_178 geometry" not in marker_sets:
s=new_marker_set('particle_178 geometry')
marker_sets["particle_178 geometry"]=s
s= marker_sets["particle_178 geometry"]
mark=s.place_marker((2906.56, 2466.02, 589.869), (0.7, 0.7, 0.7), 88.4916)
if "particle_179 geometry" not in marker_sets:
s=new_marker_set('particle_179 geometry')
marker_sets["particle_179 geometry"]=s
s= marker_sets["particle_179 geometry"]
mark=s.place_marker((2628.5, 2189.76, 873.176), (0.7, 0.7, 0.7), 111.334)
if "particle_180 geometry" not in marker_sets:
s=new_marker_set('particle_180 geometry')
marker_sets["particle_180 geometry"]=s
s= marker_sets["particle_180 geometry"]
mark=s.place_marker((2540.91, 2026.2, 1452.45), (0.7, 0.7, 0.7), 127.619)
if "particle_181 geometry" not in marker_sets:
s=new_marker_set('particle_181 geometry')
marker_sets["particle_181 geometry"]=s
s= marker_sets["particle_181 geometry"]
mark=s.place_marker((2484.43, 2010.37, 1903.14), (0.7, 0.7, 0.7), 230.746)
if "particle_182 geometry" not in marker_sets:
s=new_marker_set('particle_182 geometry')
marker_sets["particle_182 geometry"]=s
s= marker_sets["particle_182 geometry"]
mark=s.place_marker((2744.56, 2291.44, 1726.23), (0.7, 0.7, 0.7), 124.573)
if "particle_183 geometry" not in marker_sets:
s=new_marker_set('particle_183 geometry')
marker_sets["particle_183 geometry"]=s
s= marker_sets["particle_183 geometry"]
mark=s.place_marker((3178.47, 2520.03, 1309.27), (0.7, 0.7, 0.7), 124.489)
if "particle_184 geometry" not in marker_sets:
s=new_marker_set('particle_184 geometry')
marker_sets["particle_184 geometry"]=s
s= marker_sets["particle_184 geometry"]
mark=s.place_marker((3489.08, 2690.39, 1439.14), (0.7, 0.7, 0.7), 196.61)
if "particle_185 geometry" not in marker_sets:
s=new_marker_set('particle_185 geometry')
marker_sets["particle_185 geometry"]=s
s= marker_sets["particle_185 geometry"]
mark=s.place_marker((3442.88, 2351.15, 1496.08), (0.7, 0.7, 0.7), 134.049)
if "particle_186 geometry" not in marker_sets:
s=new_marker_set('particle_186 geometry')
marker_sets["particle_186 geometry"]=s
s= marker_sets["particle_186 geometry"]
mark=s.place_marker((3466.1, 2082.76, 1314.59), (0.7, 0.7, 0.7), 141.493)
if "particle_187 geometry" not in marker_sets:
s=new_marker_set('particle_187 geometry')
marker_sets["particle_187 geometry"]=s
s= marker_sets["particle_187 geometry"]
mark=s.place_marker((3642.24, 1910.25, 979.854), (0.7, 0.7, 0.7), 172.203)
if "particle_188 geometry" not in marker_sets:
s=new_marker_set('particle_188 geometry')
marker_sets["particle_188 geometry"]=s
s= marker_sets["particle_188 geometry"]
mark=s.place_marker((3680.4, 2187.12, 1554.46), (0.7, 0.7, 0.7), 271.354)
if "particle_189 geometry" not in marker_sets:
s=new_marker_set('particle_189 geometry')
marker_sets["particle_189 geometry"]=s
s= marker_sets["particle_189 geometry"]
mark=s.place_marker((3549.82, 2606.81, 1819.58), (0.7, 0.7, 0.7), 97.0785)
if "particle_190 geometry" not in marker_sets:
s=new_marker_set('particle_190 geometry')
marker_sets["particle_190 geometry"]=s
s= marker_sets["particle_190 geometry"]
mark=s.place_marker((3420.36, 3003.42, 1832.88), (0.7, 0.7, 0.7), 151.857)
if "particle_191 geometry" not in marker_sets:
s=new_marker_set('particle_191 geometry')
marker_sets["particle_191 geometry"]=s
s= marker_sets["particle_191 geometry"]
mark=s.place_marker((3286.25, 3498.56, 2119.35), (0.7, 0.7, 0.7), 199.233)
if "particle_192 geometry" not in marker_sets:
s=new_marker_set('particle_192 geometry')
marker_sets["particle_192 geometry"]=s
s= marker_sets["particle_192 geometry"]
mark=s.place_marker((2865.64, 3461.13, 2540.11), (0.7, 0.7, 0.7), 118.863)
if "particle_193 geometry" not in marker_sets:
s=new_marker_set('particle_193 geometry')
marker_sets["particle_193 geometry"]=s
s= marker_sets["particle_193 geometry"]
mark=s.place_marker((2547.67, 3744.3, 2692.25), (0.7, 0.7, 0.7), 172.415)
if "particle_194 geometry" not in marker_sets:
s=new_marker_set('particle_194 geometry')
marker_sets["particle_194 geometry"]=s
s= marker_sets["particle_194 geometry"]
mark=s.place_marker((2429.63, 4251.63, 2669.34), (0.7, 0.7, 0.7), 134.26)
if "particle_195 geometry" not in marker_sets:
s=new_marker_set('particle_195 geometry')
marker_sets["particle_195 geometry"]=s
s= marker_sets["particle_195 geometry"]
mark=s.place_marker((2519.87, 5179.4, 2534.04), (0.7, 0.7, 0.7), 139.548)
if "particle_196 geometry" not in marker_sets:
s=new_marker_set('particle_196 geometry')
marker_sets["particle_196 geometry"]=s
s= marker_sets["particle_196 geometry"]
mark=s.place_marker((2476.11, 5048.63, 2003.98), (0.7, 0.7, 0.7), 196.526)
if "particle_197 geometry" not in marker_sets:
s=new_marker_set('particle_197 geometry')
marker_sets["particle_197 geometry"]=s
s= marker_sets["particle_197 geometry"]
mark=s.place_marker((2527.75, 4342.32, 1676.6), (0.7, 0.7, 0.7), 136.206)
if "particle_198 geometry" not in marker_sets:
s=new_marker_set('particle_198 geometry')
marker_sets["particle_198 geometry"]=s
s= marker_sets["particle_198 geometry"]
mark=s.place_marker((2309.42, 3397.98, 1712.23), (0.7, 0.7, 0.7), 152.322)
if "particle_199 geometry" not in marker_sets:
s=new_marker_set('particle_199 geometry')
marker_sets["particle_199 geometry"]=s
s= marker_sets["particle_199 geometry"]
mark=s.place_marker((2286.32, 2750.35, 1875.06), (0.7, 0.7, 0.7), 126.054)
if "particle_200 geometry" not in marker_sets:
s=new_marker_set('particle_200 geometry')
marker_sets["particle_200 geometry"]=s
s= marker_sets["particle_200 geometry"]
mark=s.place_marker((2655.15, 2624.23, 1683.08), (0.7, 0.7, 0.7), 164.378)
if "particle_201 geometry" not in marker_sets:
s=new_marker_set('particle_201 geometry')
marker_sets["particle_201 geometry"]=s
s= marker_sets["particle_201 geometry"]
mark=s.place_marker((3102.59, 2681.02, 1647.33), (0.7, 0.7, 0.7), 122.205)
if "particle_202 geometry" not in marker_sets:
s=new_marker_set('particle_202 geometry')
marker_sets["particle_202 geometry"]=s
s= marker_sets["particle_202 geometry"]
mark=s.place_marker((3505.93, 2755.78, 1808.21), (0.7, 0.7, 0.7), 134.979)
if "particle_203 geometry" not in marker_sets:
s=new_marker_set('particle_203 geometry')
marker_sets["particle_203 geometry"]=s
s= marker_sets["particle_203 geometry"]
mark=s.place_marker((3406.32, 2764.53, 2162.98), (0.7, 0.7, 0.7), 136.375)
if "particle_204 geometry" not in marker_sets:
s=new_marker_set('particle_204 geometry')
marker_sets["particle_204 geometry"]=s
s= marker_sets["particle_204 geometry"]
mark=s.place_marker((3270.52, 2967.5, 1993.88), (0.7, 0.7, 0.7), 151.688)
if "particle_205 geometry" not in marker_sets:
s=new_marker_set('particle_205 geometry')
marker_sets["particle_205 geometry"]=s
s= marker_sets["particle_205 geometry"]
mark=s.place_marker((3406.01, 3018.65, 1945.28), (0.7, 0.7, 0.7), 116.156)
if "particle_206 geometry" not in marker_sets:
s=new_marker_set('particle_206 geometry')
marker_sets["particle_206 geometry"]=s
s= marker_sets["particle_206 geometry"]
mark=s.place_marker((2880.67, 2617.26, 2220.14), (0.7, 0.7, 0.7), 122.839)
if "particle_207 geometry" not in marker_sets:
s=new_marker_set('particle_207 geometry')
marker_sets["particle_207 geometry"]=s
s= marker_sets["particle_207 geometry"]
mark=s.place_marker((2404.33, 2456.66, 2195.04), (0.7, 0.7, 0.7), 164.716)
if "particle_208 geometry" not in marker_sets:
s=new_marker_set('particle_208 geometry')
marker_sets["particle_208 geometry"]=s
s= marker_sets["particle_208 geometry"]
mark=s.place_marker((2593.21, 3066.83, 1608.36), (0.7, 0.7, 0.7), 303.672)
if "particle_209 geometry" not in marker_sets:
s=new_marker_set('particle_209 geometry')
marker_sets["particle_209 geometry"]=s
s= marker_sets["particle_209 geometry"]
mark=s.place_marker((3217, 3818.82, 1267.42), (0.7, 0.7, 0.7), 220.298)
if "particle_210 geometry" not in marker_sets:
s=new_marker_set('particle_210 geometry')
marker_sets["particle_210 geometry"]=s
s= marker_sets["particle_210 geometry"]
mark=s.place_marker((3700.73, 3409.89, 1387.84), (0.7, 0.7, 0.7), 175.883)
if "particle_211 geometry" not in marker_sets:
s=new_marker_set('particle_211 geometry')
marker_sets["particle_211 geometry"]=s
s= marker_sets["particle_211 geometry"]
mark=s.place_marker((3957.32, 2784.52, 1240.48), (0.7, 0.7, 0.7), 233.581)
if "particle_212 geometry" not in marker_sets:
s=new_marker_set('particle_212 geometry')
marker_sets["particle_212 geometry"]=s
s= marker_sets["particle_212 geometry"]
mark=s.place_marker((3622.07, 2157.13, 919.179), (0.7, 0.7, 0.7), 231.127)
if "particle_213 geometry" not in marker_sets:
s=new_marker_set('particle_213 geometry')
marker_sets["particle_213 geometry"]=s
s= marker_sets["particle_213 geometry"]
mark=s.place_marker((3738.48, 1551.55, 943.677), (0.7, 0.7, 0.7), 247.413)
if "particle_214 geometry" not in marker_sets:
s=new_marker_set('particle_214 geometry')
marker_sets["particle_214 geometry"]=s
s= marker_sets["particle_214 geometry"]
mark=s.place_marker((4163, 1151.12, 1228.2), (0.7, 0.7, 0.7), 200.206)
if "particle_215 geometry" not in marker_sets:
s=new_marker_set('particle_215 geometry')
marker_sets["particle_215 geometry"]=s
s= marker_sets["particle_215 geometry"]
mark=s.place_marker((4442.96, 1316.63, 1521.93), (0.7, 0.7, 0.7), 150.419)
if "particle_216 geometry" not in marker_sets:
s=new_marker_set('particle_216 geometry')
marker_sets["particle_216 geometry"]=s
s= marker_sets["particle_216 geometry"]
mark=s.place_marker((3870.06, 1272.89, 1713.83), (0.7, 0.7, 0.7), 140.14)
if "particle_217 geometry" not in marker_sets:
s=new_marker_set('particle_217 geometry')
marker_sets["particle_217 geometry"]=s
s= marker_sets["particle_217 geometry"]
mark=s.place_marker((3485.75, 1037.32, 1663.21), (0.7, 0.7, 0.7), 132.949)
if "particle_218 geometry" not in marker_sets:
s=new_marker_set('particle_218 geometry')
marker_sets["particle_218 geometry"]=s
s= marker_sets["particle_218 geometry"]
mark=s.place_marker((3117.49, 941.409, 1751.78), (0.7, 0.7, 0.7), 141.113)
if "particle_219 geometry" not in marker_sets:
s=new_marker_set('particle_219 geometry')
marker_sets["particle_219 geometry"]=s
s= marker_sets["particle_219 geometry"]
mark=s.place_marker((2961.49, 1020.34, 1466.98), (0.7, 0.7, 0.7), 171.526)
if "particle_220 geometry" not in marker_sets:
s=new_marker_set('particle_220 geometry')
marker_sets["particle_220 geometry"]=s
s= marker_sets["particle_220 geometry"]
mark=s.place_marker((3278.21, 1315.7, 1075.79), (0.7, 0.7, 0.7), 326.937)
if "particle_221 geometry" not in marker_sets:
s=new_marker_set('particle_221 geometry')
marker_sets["particle_221 geometry"]=s
s= marker_sets["particle_221 geometry"]
mark=s.place_marker((3580.23, 1798.7, 1043.6), (0.7, 0.7, 0.7), 92.0871)
if "particle_222 geometry" not in marker_sets:
s=new_marker_set('particle_222 geometry')
marker_sets["particle_222 geometry"]=s
s= marker_sets["particle_222 geometry"]
mark=s.place_marker((3229.02, 2047.94, 1138.97), (0.7, 0.7, 0.7), 210.273)
if "particle_223 geometry" not in marker_sets:
s=new_marker_set('particle_223 geometry')
marker_sets["particle_223 geometry"]=s
s= marker_sets["particle_223 geometry"]
mark=s.place_marker((2689.37, 1828.64, 1588.26), (0.7, 0.7, 0.7), 122.628)
if "particle_224 geometry" not in marker_sets:
s=new_marker_set('particle_224 geometry')
marker_sets["particle_224 geometry"]=s
s= marker_sets["particle_224 geometry"]
mark=s.place_marker((2525.19, 1682.91, 1714.61), (0.7, 0.7, 0.7), 109.176)
if "particle_225 geometry" not in marker_sets:
s=new_marker_set('particle_225 geometry')
marker_sets["particle_225 geometry"]=s
s= marker_sets["particle_225 geometry"]
mark=s.place_marker((2775.75, 1822.86, 1675.36), (0.7, 0.7, 0.7), 142.213)
if "particle_226 geometry" not in marker_sets:
s=new_marker_set('particle_226 geometry')
marker_sets["particle_226 geometry"]=s
s= marker_sets["particle_226 geometry"]
mark=s.place_marker((2775.26, 2190.18, 1477.79), (0.7, 0.7, 0.7), 250.078)
if "particle_227 geometry" not in marker_sets:
s=new_marker_set('particle_227 geometry')
marker_sets["particle_227 geometry"]=s
s= marker_sets["particle_227 geometry"]
mark=s.place_marker((2978.14, 2355.45, 1860.08), (0.7, 0.7, 0.7), 123.558)
if "particle_228 geometry" not in marker_sets:
s=new_marker_set('particle_228 geometry')
marker_sets["particle_228 geometry"]=s
s= marker_sets["particle_228 geometry"]
mark=s.place_marker((2931.75, 2284.99, 2331.55), (0.7, 0.7, 0.7), 235.992)
if "particle_229 geometry" not in marker_sets:
s=new_marker_set('particle_229 geometry')
marker_sets["particle_229 geometry"]=s
s= marker_sets["particle_229 geometry"]
mark=s.place_marker((2983.77, 2338.87, 2817.22), (0.7, 0.7, 0.7), 172.373)
if "particle_230 geometry" not in marker_sets:
s=new_marker_set('particle_230 geometry')
marker_sets["particle_230 geometry"]=s
s= marker_sets["particle_230 geometry"]
mark=s.place_marker((3330.23, 2609.28, 2949.06), (0.7, 0.7, 0.7), 152.322)
if "particle_231 geometry" not in marker_sets:
s=new_marker_set('particle_231 geometry')
marker_sets["particle_231 geometry"]=s
s= marker_sets["particle_231 geometry"]
mark=s.place_marker((3573.75, 2800.84, 2928.55), (0.7, 0.7, 0.7), 196.653)
if "particle_232 geometry" not in marker_sets:
s=new_marker_set('particle_232 geometry')
marker_sets["particle_232 geometry"]=s
s= marker_sets["particle_232 geometry"]
mark=s.place_marker((3539.71, 2458.57, 3003.05), (0.7, 0.7, 0.7), 134.091)
if "particle_233 geometry" not in marker_sets:
s=new_marker_set('particle_233 geometry')
marker_sets["particle_233 geometry"]=s
s= marker_sets["particle_233 geometry"]
mark=s.place_marker((3460.56, 2141.32, 3036.16), (0.7, 0.7, 0.7), 180.325)
if "particle_234 geometry" not in marker_sets:
s=new_marker_set('particle_234 geometry')
marker_sets["particle_234 geometry"]=s
s= marker_sets["particle_234 geometry"]
mark=s.place_marker((3152.36, 2343.8, 2764.96), (0.7, 0.7, 0.7), 218.437)
if "particle_235 geometry" not in marker_sets:
s=new_marker_set('particle_235 geometry')
marker_sets["particle_235 geometry"]=s
s= marker_sets["particle_235 geometry"]
mark=s.place_marker((3211.49, 2520.18, 2343.44), (0.7, 0.7, 0.7), 148.008)
if "particle_236 geometry" not in marker_sets:
s=new_marker_set('particle_236 geometry')
marker_sets["particle_236 geometry"]=s
s= marker_sets["particle_236 geometry"]
mark=s.place_marker((3656.85, 2672.07, 1915.99), (0.7, 0.7, 0.7), 191.873)
if "particle_237 geometry" not in marker_sets:
s=new_marker_set('particle_237 geometry')
marker_sets["particle_237 geometry"]=s
s= marker_sets["particle_237 geometry"]
mark=s.place_marker((4020.22, 2524.11, 1530.62), (0.7, 0.7, 0.7), 138.575)
if "particle_238 geometry" not in marker_sets:
s=new_marker_set('particle_238 geometry')
marker_sets["particle_238 geometry"]=s
s= marker_sets["particle_238 geometry"]
mark=s.place_marker((4442.81, 2466.09, 1605.15), (0.7, 0.7, 0.7), 161.205)
if "particle_239 geometry" not in marker_sets:
s=new_marker_set('particle_239 geometry')
marker_sets["particle_239 geometry"]=s
s= marker_sets["particle_239 geometry"]
mark=s.place_marker((4138.2, 2805.28, 1732.57), (0.7, 0.7, 0.7), 288.021)
if "particle_240 geometry" not in marker_sets:
s=new_marker_set('particle_240 geometry')
marker_sets["particle_240 geometry"]=s
s= marker_sets["particle_240 geometry"]
mark=s.place_marker((3901.26, 2587.03, 2378.66), (0.7, 0.7, 0.7), 227.405)
if "particle_241 geometry" not in marker_sets:
s=new_marker_set('particle_241 geometry')
marker_sets["particle_241 geometry"]=s
s= marker_sets["particle_241 geometry"]
mark=s.place_marker((3508.93, 2516.82, 2715.54), (0.7, 0.7, 0.7), 126.519)
if "particle_242 geometry" not in marker_sets:
s=new_marker_set('particle_242 geometry')
marker_sets["particle_242 geometry"]=s
s= marker_sets["particle_242 geometry"]
mark=s.place_marker((3649.34, 2781.62, 2688.71), (0.7, 0.7, 0.7), 117.975)
if "particle_243 geometry" not in marker_sets:
s=new_marker_set('particle_243 geometry')
marker_sets["particle_243 geometry"]=s
s= marker_sets["particle_243 geometry"]
mark=s.place_marker((3271.3, 2693.96, 2575.55), (0.7, 0.7, 0.7), 200.883)
if "particle_244 geometry" not in marker_sets:
s=new_marker_set('particle_244 geometry')
marker_sets["particle_244 geometry"]=s
s= marker_sets["particle_244 geometry"]
mark=s.place_marker((3172.7, 2337.86, 2667.71), (0.7, 0.7, 0.7), 158.794)
if "particle_245 geometry" not in marker_sets:
s=new_marker_set('particle_245 geometry')
marker_sets["particle_245 geometry"]=s
s= marker_sets["particle_245 geometry"]
mark=s.place_marker((3225.68, 2020.69, 2666.86), (0.7, 0.7, 0.7), 115.86)
if "particle_246 geometry" not in marker_sets:
s=new_marker_set('particle_246 geometry')
marker_sets["particle_246 geometry"]=s
s= marker_sets["particle_246 geometry"]
mark=s.place_marker((3001.46, 1935.5, 2722.97), (0.7, 0.7, 0.7), 133.034)
if "particle_247 geometry" not in marker_sets:
s=new_marker_set('particle_247 geometry')
marker_sets["particle_247 geometry"]=s
s= marker_sets["particle_247 geometry"]
mark=s.place_marker((2745.21, 2234.66, 2989.76), (0.7, 0.7, 0.7), 314.627)
if "particle_248 geometry" not in marker_sets:
s=new_marker_set('particle_248 geometry')
marker_sets["particle_248 geometry"]=s
s= marker_sets["particle_248 geometry"]
mark=s.place_marker((3037.32, 2412.3, 2870.52), (0.7, 0.7, 0.7), 115.352)
if "particle_249 geometry" not in marker_sets:
s=new_marker_set('particle_249 geometry')
marker_sets["particle_249 geometry"]=s
s= marker_sets["particle_249 geometry"]
mark=s.place_marker((3436.15, 2366.11, 2714.4), (0.7, 0.7, 0.7), 180.621)
if "particle_250 geometry" not in marker_sets:
s=new_marker_set('particle_250 geometry')
marker_sets["particle_250 geometry"]=s
s= marker_sets["particle_250 geometry"]
mark=s.place_marker((3480.11, 2011.1, 2625.09), (0.7, 0.7, 0.7), 126.265)
if "particle_251 geometry" not in marker_sets:
s=new_marker_set('particle_251 geometry')
marker_sets["particle_251 geometry"]=s
s= marker_sets["particle_251 geometry"]
mark=s.place_marker((3252.11, 1718.35, 2489.5), (0.7, 0.7, 0.7), 133.541)
if "particle_252 geometry" not in marker_sets:
s=new_marker_set('particle_252 geometry')
marker_sets["particle_252 geometry"]=s
s= marker_sets["particle_252 geometry"]
mark=s.place_marker((3173.7, 1377.26, 2216.42), (0.7, 0.7, 0.7), 171.019)
if "particle_253 geometry" not in marker_sets:
s=new_marker_set('particle_253 geometry')
marker_sets["particle_253 geometry"]=s
s= marker_sets["particle_253 geometry"]
mark=s.place_marker((3224.51, 1113.06, 1904.49), (0.7, 0.7, 0.7), 115.437)
if "particle_254 geometry" not in marker_sets:
s=new_marker_set('particle_254 geometry')
marker_sets["particle_254 geometry"]=s
s= marker_sets["particle_254 geometry"]
mark=s.place_marker((3354.21, 1133.76, 2175.9), (0.7, 0.7, 0.7), 158.583)
if "particle_255 geometry" not in marker_sets:
s=new_marker_set('particle_255 geometry')
marker_sets["particle_255 geometry"]=s
s= marker_sets["particle_255 geometry"]
mark=s.place_marker((3043.67, 1431.26, 2261.6), (0.7, 0.7, 0.7), 192)
if "particle_256 geometry" not in marker_sets:
s=new_marker_set('particle_256 geometry')
marker_sets["particle_256 geometry"]=s
s= marker_sets["particle_256 geometry"]
mark=s.place_marker((2849.52, 1732.82, 2515.83), (0.7, 0.7, 0.7), 150.165)
if "particle_257 geometry" not in marker_sets:
s=new_marker_set('particle_257 geometry')
marker_sets["particle_257 geometry"]=s
s= marker_sets["particle_257 geometry"]
mark=s.place_marker((2644.81, 1752.22, 2488.99), (0.7, 0.7, 0.7), 157.567)
if "particle_258 geometry" not in marker_sets:
s=new_marker_set('particle_258 geometry')
marker_sets["particle_258 geometry"]=s
s= marker_sets["particle_258 geometry"]
mark=s.place_marker((2573.5, 1838.26, 2450.41), (0.7, 0.7, 0.7), 199.36)
if "particle_259 geometry" not in marker_sets:
s=new_marker_set('particle_259 geometry')
marker_sets["particle_259 geometry"]=s
s= marker_sets["particle_259 geometry"]
mark=s.place_marker((2892.99, 1950.55, 2127.98), (0.7, 0.7, 0.7), 105.369)
if "particle_260 geometry" not in marker_sets:
s=new_marker_set('particle_260 geometry')
marker_sets["particle_260 geometry"]=s
s= marker_sets["particle_260 geometry"]
mark=s.place_marker((3001.04, 2133.68, 1958.25), (0.7, 0.7, 0.7), 118.651)
if "particle_261 geometry" not in marker_sets:
s=new_marker_set('particle_261 geometry')
marker_sets["particle_261 geometry"]=s
s= marker_sets["particle_261 geometry"]
mark=s.place_marker((2784.79, 2202.24, 2341.91), (0.7, 0.7, 0.7), 219.664)
if "particle_262 geometry" not in marker_sets:
s=new_marker_set('particle_262 geometry')
marker_sets["particle_262 geometry"]=s
s= marker_sets["particle_262 geometry"]
mark=s.place_marker((2414.65, 2097.95, 2794.64), (0.7, 0.7, 0.7), 196.018)
if "particle_263 geometry" not in marker_sets:
s=new_marker_set('particle_263 geometry')
marker_sets["particle_263 geometry"]=s
s= marker_sets["particle_263 geometry"]
mark=s.place_marker((2054.93, 2075.93, 3149.75), (0.7, 0.7, 0.7), 218.141)
if "particle_264 geometry" not in marker_sets:
s=new_marker_set('particle_264 geometry')
marker_sets["particle_264 geometry"]=s
s= marker_sets["particle_264 geometry"]
mark=s.place_marker((1789.36, 1977.85, 2940.39), (0.7, 0.7, 0.7), 181.636)
if "particle_265 geometry" not in marker_sets:
s=new_marker_set('particle_265 geometry')
marker_sets["particle_265 geometry"]=s
s= marker_sets["particle_265 geometry"]
mark=s.place_marker((1901.79, 1975.19, 2666.21), (0.7, 0.7, 0.7), 195.003)
if "particle_266 geometry" not in marker_sets:
s=new_marker_set('particle_266 geometry')
marker_sets["particle_266 geometry"]=s
s= marker_sets["particle_266 geometry"]
mark=s.place_marker((1840.97, 1893.34, 2890.58), (0.7, 0.7, 0.7), 139.209)
if "particle_267 geometry" not in marker_sets:
s=new_marker_set('particle_267 geometry')
marker_sets["particle_267 geometry"]=s
s= marker_sets["particle_267 geometry"]
mark=s.place_marker((1836.35, 1809.08, 2892.38), (0.7, 0.7, 0.7), 189.885)
if "particle_268 geometry" not in marker_sets:
s=new_marker_set('particle_268 geometry')
marker_sets["particle_268 geometry"]=s
s= marker_sets["particle_268 geometry"]
mark=s.place_marker((2143.89, 1675.04, 2861.29), (0.7, 0.7, 0.7), 267.674)
if "particle_269 geometry" not in marker_sets:
s=new_marker_set('particle_269 geometry')
marker_sets["particle_269 geometry"]=s
s= marker_sets["particle_269 geometry"]
mark=s.place_marker((2686.28, 1595.07, 3006.65), (0.7, 0.7, 0.7), 196.568)
if "particle_270 geometry" not in marker_sets:
s=new_marker_set('particle_270 geometry')
marker_sets["particle_270 geometry"]=s
s= marker_sets["particle_270 geometry"]
mark=s.place_marker((2562.61, 1371.04, 2996.24), (0.7, 0.7, 0.7), 192.423)
if "particle_271 geometry" not in marker_sets:
s=new_marker_set('particle_271 geometry')
marker_sets["particle_271 geometry"]=s
s= marker_sets["particle_271 geometry"]
mark=s.place_marker((2186.93, 1265.77, 3064.08), (1, 0.7, 0), 202.405)
if "particle_272 geometry" not in marker_sets:
s=new_marker_set('particle_272 geometry')
marker_sets["particle_272 geometry"]=s
s= marker_sets["particle_272 geometry"]
mark=s.place_marker((3032.45, 1368.05, 2985.76), (0.7, 0.7, 0.7), 135.529)
if "particle_273 geometry" not in marker_sets:
s=new_marker_set('particle_273 geometry')
marker_sets["particle_273 geometry"]=s
s= marker_sets["particle_273 geometry"]
mark=s.place_marker((4005.95, 1347.53, 2819.48), (0.7, 0.7, 0.7), 114.21)
if "particle_274 geometry" not in marker_sets:
s=new_marker_set('particle_274 geometry')
marker_sets["particle_274 geometry"]=s
s= marker_sets["particle_274 geometry"]
mark=s.place_marker((3994.36, 1548.37, 2558.57), (0.7, 0.7, 0.7), 159.133)
if "particle_275 geometry" not in marker_sets:
s=new_marker_set('particle_275 geometry')
marker_sets["particle_275 geometry"]=s
s= marker_sets["particle_275 geometry"]
mark=s.place_marker((3746.51, 1876.52, 2551.95), (0.7, 0.7, 0.7), 144.412)
if "particle_276 geometry" not in marker_sets:
s=new_marker_set('particle_276 geometry')
marker_sets["particle_276 geometry"]=s
s= marker_sets["particle_276 geometry"]
mark=s.place_marker((3561.73, 2132.04, 2588.36), (0.7, 0.7, 0.7), 70.8525)
if "particle_277 geometry" not in marker_sets:
s=new_marker_set('particle_277 geometry')
marker_sets["particle_277 geometry"]=s
s= marker_sets["particle_277 geometry"]
mark=s.place_marker((2938.73, 2078.25, 2663.93), (0.7, 0.7, 0.7), 141.874)
if "particle_278 geometry" not in marker_sets:
s=new_marker_set('particle_278 geometry')
marker_sets["particle_278 geometry"]=s
s= marker_sets["particle_278 geometry"]
mark=s.place_marker((2351.2, 1939.29, 2778.2), (0.7, 0.7, 0.7), 217.337)
if "particle_279 geometry" not in marker_sets:
s=new_marker_set('particle_279 geometry')
marker_sets["particle_279 geometry"]=s
s= marker_sets["particle_279 geometry"]
mark=s.place_marker((2344.54, 1940.66, 2831.99), (0.7, 0.7, 0.7), 237.641)
if "particle_280 geometry" not in marker_sets:
s=new_marker_set('particle_280 geometry')
marker_sets["particle_280 geometry"]=s
s= marker_sets["particle_280 geometry"]
mark=s.place_marker((2713.54, 2209.72, 2928.05), (0.7, 0.7, 0.7), 229.393)
if "particle_281 geometry" not in marker_sets:
s=new_marker_set('particle_281 geometry')
marker_sets["particle_281 geometry"]=s
s= marker_sets["particle_281 geometry"]
mark=s.place_marker((2357.75, 2317.49, 3410.87), (0.7, 0.7, 0.7), 349.906)
if "particle_282 geometry" not in marker_sets:
s=new_marker_set('particle_282 geometry')
marker_sets["particle_282 geometry"]=s
s= marker_sets["particle_282 geometry"]
mark=s.place_marker((2060.84, 2051.96, 3835.51), (0.7, 0.7, 0.7), 162.347)
if "particle_283 geometry" not in marker_sets:
s=new_marker_set('particle_283 geometry')
marker_sets["particle_283 geometry"]=s
s= marker_sets["particle_283 geometry"]
mark=s.place_marker((2035.08, 1980.96, 4021.74), (0.7, 0.7, 0.7), 194.072)
if "particle_284 geometry" not in marker_sets:
s=new_marker_set('particle_284 geometry')
marker_sets["particle_284 geometry"]=s
s= marker_sets["particle_284 geometry"]
mark=s.place_marker((2124.92, 2112.39, 4112.61), (0.7, 0.7, 0.7), 242.21)
if "particle_285 geometry" not in marker_sets:
s=new_marker_set('particle_285 geometry')
marker_sets["particle_285 geometry"]=s
s= marker_sets["particle_285 geometry"]
mark=s.place_marker((2571.09, 1967.99, 4429.46), (0.7, 0.7, 0.7), 320.93)
if "particle_286 geometry" not in marker_sets:
s=new_marker_set('particle_286 geometry')
marker_sets["particle_286 geometry"]=s
s= marker_sets["particle_286 geometry"]
mark=s.place_marker((2589.59, 1991.84, 5005.12), (0.7, 0.7, 0.7), 226.432)
if "particle_287 geometry" not in marker_sets:
s=new_marker_set('particle_287 geometry')
marker_sets["particle_287 geometry"]=s
s= marker_sets["particle_287 geometry"]
mark=s.place_marker((2241.57, 2204.76, 5018.63), (0.7, 0.7, 0.7), 125.208)
if "particle_288 geometry" not in marker_sets:
s=new_marker_set('particle_288 geometry')
marker_sets["particle_288 geometry"]=s
s= marker_sets["particle_288 geometry"]
mark=s.place_marker((1882.26, 2474.79, 4761.9), (0.7, 0.7, 0.7), 197.837)
if "particle_289 geometry" not in marker_sets:
s=new_marker_set('particle_289 geometry')
marker_sets["particle_289 geometry"]=s
s= marker_sets["particle_289 geometry"]
mark=s.place_marker((1780.51, 3070.89, 4957.2), (0.7, 0.7, 0.7), 167.804)
if "particle_290 geometry" not in marker_sets:
s=new_marker_set('particle_290 geometry')
marker_sets["particle_290 geometry"]=s
s= marker_sets["particle_290 geometry"]
mark=s.place_marker((1845.67, 3737.35, 5448.61), (0.7, 0.7, 0.7), 136.84)
if "particle_291 geometry" not in marker_sets:
s=new_marker_set('particle_291 geometry')
marker_sets["particle_291 geometry"]=s
s= marker_sets["particle_291 geometry"]
mark=s.place_marker((2304.83, 3786.04, 5448.18), (0.7, 0.7, 0.7), 85.7421)
if "particle_292 geometry" not in marker_sets:
s=new_marker_set('particle_292 geometry')
marker_sets["particle_292 geometry"]=s
s= marker_sets["particle_292 geometry"]
mark=s.place_marker((2376.09, 2540.16, 4776.6), (1, 0.7, 0), 256)
if "particle_293 geometry" not in marker_sets:
s=new_marker_set('particle_293 geometry')
marker_sets["particle_293 geometry"]=s
s= marker_sets["particle_293 geometry"]
mark=s.place_marker((1687.3, 3390.28, 5015.79), (0.7, 0.7, 0.7), 138.702)
if "particle_294 geometry" not in marker_sets:
s=new_marker_set('particle_294 geometry')
marker_sets["particle_294 geometry"]=s
s= marker_sets["particle_294 geometry"]
mark=s.place_marker((1289.78, 3626.69, 5113.47), (0.7, 0.7, 0.7), 140.732)
if "particle_295 geometry" not in marker_sets:
s=new_marker_set('particle_295 geometry')
marker_sets["particle_295 geometry"]=s
s= marker_sets["particle_295 geometry"]
mark=s.place_marker((1468.3, 3410.46, 5241.54), (0.7, 0.7, 0.7), 81.3006)
if "particle_296 geometry" not in marker_sets:
s=new_marker_set('particle_296 geometry')
marker_sets["particle_296 geometry"]=s
s= marker_sets["particle_296 geometry"]
mark=s.place_marker((1670.38, 3397.94, 5626.47), (0.7, 0.7, 0.7), 133.837)
if "particle_297 geometry" not in marker_sets:
s=new_marker_set('particle_297 geometry')
marker_sets["particle_297 geometry"]=s
s= marker_sets["particle_297 geometry"]
mark=s.place_marker((1900.19, 2930.11, 5268.16), (0.7, 0.7, 0.7), 98.3475)
if "particle_298 geometry" not in marker_sets:
s=new_marker_set('particle_298 geometry')
marker_sets["particle_298 geometry"]=s
s= marker_sets["particle_298 geometry"]
mark=s.place_marker((1886.36, 2401.55, 4640.81), (0.7, 0.7, 0.7), 297.623)
if "particle_299 geometry" not in marker_sets:
s=new_marker_set('particle_299 geometry')
marker_sets["particle_299 geometry"]=s
s= marker_sets["particle_299 geometry"]
mark=s.place_marker((2115.04, 2180.68, 4350.05), (0.7, 0.7, 0.7), 212.938)
if "particle_300 geometry" not in marker_sets:
s=new_marker_set('particle_300 geometry')
marker_sets["particle_300 geometry"]=s
s= marker_sets["particle_300 geometry"]
mark=s.place_marker((1987.22, 2030.16, 4487.36), (0.7, 0.7, 0.7), 154.183)
if "particle_301 geometry" not in marker_sets:
s=new_marker_set('particle_301 geometry')
marker_sets["particle_301 geometry"]=s
s= marker_sets["particle_301 geometry"]
mark=s.place_marker((2280.72, 1860.08, 4733.1), (0.7, 0.7, 0.7), 180.832)
if "particle_302 geometry" not in marker_sets:
s=new_marker_set('particle_302 geometry')
marker_sets["particle_302 geometry"]=s
s= marker_sets["particle_302 geometry"]
mark=s.place_marker((2651.99, 1872.15, 4798.48), (0.7, 0.7, 0.7), 122.332)
if "particle_303 geometry" not in marker_sets:
s=new_marker_set('particle_303 geometry')
marker_sets["particle_303 geometry"]=s
s= marker_sets["particle_303 geometry"]
mark=s.place_marker((3004.11, 2016.77, 4781.42), (0.7, 0.7, 0.7), 209.047)
if "particle_304 geometry" not in marker_sets:
s=new_marker_set('particle_304 geometry')
marker_sets["particle_304 geometry"]=s
s= marker_sets["particle_304 geometry"]
mark=s.place_marker((3009.48, 1662.13, 4979.2), (0.7, 0.7, 0.7), 126.985)
if "particle_305 geometry" not in marker_sets:
s=new_marker_set('particle_305 geometry')
marker_sets["particle_305 geometry"]=s
s= marker_sets["particle_305 geometry"]
mark=s.place_marker((3096.77, 1543.13, 5357.1), (0.7, 0.7, 0.7), 122.205)
if "particle_306 geometry" not in marker_sets:
s=new_marker_set('particle_306 geometry')
marker_sets["particle_306 geometry"]=s
s= marker_sets["particle_306 geometry"]
mark=s.place_marker((2957.52, 1589.46, 5566), (0.7, 0.7, 0.7), 107.95)
if "particle_307 geometry" not in marker_sets:
s=new_marker_set('particle_307 geometry')
marker_sets["particle_307 geometry"]=s
s= marker_sets["particle_307 geometry"]
mark=s.place_marker((2641.05, 1630.77, 5057.98), (0.7, 0.7, 0.7), 182.567)
if "particle_308 geometry" not in marker_sets:
s=new_marker_set('particle_308 geometry')
marker_sets["particle_308 geometry"]=s
s= marker_sets["particle_308 geometry"]
mark=s.place_marker((2352.81, 1861.66, 4547.23), (0.7, 0.7, 0.7), 185.274)
if "particle_309 geometry" not in marker_sets:
s=new_marker_set('particle_309 geometry')
marker_sets["particle_309 geometry"]=s
s= marker_sets["particle_309 geometry"]
mark=s.place_marker((2400.95, 2183.41, 4158.16), (0.7, 0.7, 0.7), 413.567)
if "particle_310 geometry" not in marker_sets:
s=new_marker_set('particle_310 geometry')
marker_sets["particle_310 geometry"]=s
s= marker_sets["particle_310 geometry"]
mark=s.place_marker((2191.26, 2122.28, 4044.64), (0.7, 0.7, 0.7), 240.01)
if "particle_311 geometry" not in marker_sets:
s=new_marker_set('particle_311 geometry')
marker_sets["particle_311 geometry"]=s
s= marker_sets["particle_311 geometry"]
mark=s.place_marker((2236.91, 2124.54, 4054.03), (0.7, 0.7, 0.7), 238.995)
if "particle_312 geometry" not in marker_sets:
s=new_marker_set('particle_312 geometry')
marker_sets["particle_312 geometry"]=s
s= marker_sets["particle_312 geometry"]
mark=s.place_marker((2143.42, 1927.81, 4293.34), (0.7, 0.7, 0.7), 203.674)
if "particle_313 geometry" not in marker_sets:
s=new_marker_set('particle_313 geometry')
marker_sets["particle_313 geometry"]=s
s= marker_sets["particle_313 geometry"]
mark=s.place_marker((2242.8, 1384.12, 4551.92), (0.7, 0.7, 0.7), 266.744)
if "particle_314 geometry" not in marker_sets:
s=new_marker_set('particle_314 geometry')
marker_sets["particle_314 geometry"]=s
s= marker_sets["particle_314 geometry"]
mark=s.place_marker((1843.61, 1469.21, 4741.25), (0.7, 0.7, 0.7), 147.585)
if "particle_315 geometry" not in marker_sets:
s=new_marker_set('particle_315 geometry')
marker_sets["particle_315 geometry"]=s
s= marker_sets["particle_315 geometry"]
mark=s.place_marker((1983.87, 1589.25, 4496.04), (0.7, 0.7, 0.7), 249.485)
if "particle_316 geometry" not in marker_sets:
s=new_marker_set('particle_316 geometry')
marker_sets["particle_316 geometry"]=s
s= marker_sets["particle_316 geometry"]
mark=s.place_marker((2337.94, 1610.65, 4257.53), (0.7, 0.7, 0.7), 119.371)
if "particle_317 geometry" not in marker_sets:
s=new_marker_set('particle_317 geometry')
marker_sets["particle_317 geometry"]=s
s= marker_sets["particle_317 geometry"]
mark=s.place_marker((3008.52, 1815.73, 4299.12), (0.7, 0.7, 0.7), 155.875)
if "particle_318 geometry" not in marker_sets:
s=new_marker_set('particle_318 geometry')
marker_sets["particle_318 geometry"]=s
s= marker_sets["particle_318 geometry"]
mark=s.place_marker((3449.78, 2418.83, 4497.11), (0.7, 0.7, 0.7), 189.419)
if "particle_319 geometry" not in marker_sets:
s=new_marker_set('particle_319 geometry')
marker_sets["particle_319 geometry"]=s
s= marker_sets["particle_319 geometry"]
mark=s.place_marker((3257.68, 2932.66, 4356.65), (0.7, 0.7, 0.7), 137.475)
if "particle_320 geometry" not in marker_sets:
s=new_marker_set('particle_320 geometry')
marker_sets["particle_320 geometry"]=s
s= marker_sets["particle_320 geometry"]
mark=s.place_marker((2942.34, 3220.85, 4090.31), (0.7, 0.7, 0.7), 176.179)
if "particle_321 geometry" not in marker_sets:
s=new_marker_set('particle_321 geometry')
marker_sets["particle_321 geometry"]=s
s= marker_sets["particle_321 geometry"]
mark=s.place_marker((2679.78, 3593.15, 3983.82), (0.7, 0.7, 0.7), 138.829)
if "particle_322 geometry" not in marker_sets:
s=new_marker_set('particle_322 geometry')
marker_sets["particle_322 geometry"]=s
s= marker_sets["particle_322 geometry"]
mark=s.place_marker((2439.46, 3934.22, 4045.62), (0.7, 0.7, 0.7), 148.727)
if "particle_323 geometry" not in marker_sets:
s=new_marker_set('particle_323 geometry')
marker_sets["particle_323 geometry"]=s
s= marker_sets["particle_323 geometry"]
mark=s.place_marker((2279.85, 4367.78, 4294.08), (0.7, 0.7, 0.7), 230.323)
if "particle_324 geometry" not in marker_sets:
s=new_marker_set('particle_324 geometry')
marker_sets["particle_324 geometry"]=s
s= marker_sets["particle_324 geometry"]
mark=s.place_marker((2550.79, 3813.58, 4383.17), (0.7, 0.7, 0.7), 175.376)
if "particle_325 geometry" not in marker_sets:
s=new_marker_set('particle_325 geometry')
marker_sets["particle_325 geometry"]=s
s= marker_sets["particle_325 geometry"]
mark=s.place_marker((2670.61, 3348.26, 4258.51), (0.7, 0.7, 0.7), 161.163)
if "particle_326 geometry" not in marker_sets:
s=new_marker_set('particle_326 geometry')
marker_sets["particle_326 geometry"]=s
s= marker_sets["particle_326 geometry"]
mark=s.place_marker((3035.17, 3522.31, 4018.36), (0.7, 0.7, 0.7), 125.885)
if "particle_327 geometry" not in marker_sets:
s=new_marker_set('particle_327 geometry')
marker_sets["particle_327 geometry"]=s
s= marker_sets["particle_327 geometry"]
mark=s.place_marker((3480.95, 3657.31, 4001.22), (0.7, 0.7, 0.7), 206.635)
if "particle_328 geometry" not in marker_sets:
s=new_marker_set('particle_328 geometry')
marker_sets["particle_328 geometry"]=s
s= marker_sets["particle_328 geometry"]
mark=s.place_marker((3140.3, 3649.62, 3676.84), (0.7, 0.7, 0.7), 151.392)
if "particle_329 geometry" not in marker_sets:
s=new_marker_set('particle_329 geometry')
marker_sets["particle_329 geometry"]=s
s= marker_sets["particle_329 geometry"]
mark=s.place_marker((2797.65, 3636.95, 3492.85), (0.7, 0.7, 0.7), 173.388)
if "particle_330 geometry" not in marker_sets:
s=new_marker_set('particle_330 geometry')
marker_sets["particle_330 geometry"]=s
s= marker_sets["particle_330 geometry"]
mark=s.place_marker((2578.57, 3862.05, 3627.6), (0.7, 0.7, 0.7), 135.825)
if "particle_331 geometry" not in marker_sets:
s=new_marker_set('particle_331 geometry')
marker_sets["particle_331 geometry"]=s
s= marker_sets["particle_331 geometry"]
mark=s.place_marker((2492.88, 4170.69, 3912.3), (0.7, 0.7, 0.7), 186.839)
if "particle_332 geometry" not in marker_sets:
s=new_marker_set('particle_332 geometry')
marker_sets["particle_332 geometry"]=s
s= marker_sets["particle_332 geometry"]
mark=s.place_marker((2470.95, 4502.24, 4245.01), (0.7, 0.7, 0.7), 121.189)
if "particle_333 geometry" not in marker_sets:
s=new_marker_set('particle_333 geometry')
marker_sets["particle_333 geometry"]=s
s= marker_sets["particle_333 geometry"]
mark=s.place_marker((2637.51, 4104.06, 4213.03), (0.7, 0.7, 0.7), 102.916)
if "particle_334 geometry" not in marker_sets:
s=new_marker_set('particle_334 geometry')
marker_sets["particle_334 geometry"]=s
s= marker_sets["particle_334 geometry"]
mark=s.place_marker((2752.86, 3482.25, 4181.85), (0.7, 0.7, 0.7), 212.769)
if "particle_335 geometry" not in marker_sets:
s=new_marker_set('particle_335 geometry')
marker_sets["particle_335 geometry"]=s
s= marker_sets["particle_335 geometry"]
mark=s.place_marker((2756.41, 2852.96, 3898.17), (0.7, 0.7, 0.7), 173.092)
if "particle_336 geometry" not in marker_sets:
s=new_marker_set('particle_336 geometry')
marker_sets["particle_336 geometry"]=s
s= marker_sets["particle_336 geometry"]
mark=s.place_marker((2896.77, 2363.26, 3896.42), (0.7, 0.7, 0.7), 264.502)
if "particle_337 geometry" not in marker_sets:
s=new_marker_set('particle_337 geometry')
marker_sets["particle_337 geometry"]=s
s= marker_sets["particle_337 geometry"]
mark=s.place_marker((3213.5, 2034.93, 4221.68), (0.7, 0.7, 0.7), 208.666)
if "particle_338 geometry" not in marker_sets:
s=new_marker_set('particle_338 geometry')
marker_sets["particle_338 geometry"]=s
s= marker_sets["particle_338 geometry"]
mark=s.place_marker((3296.69, 1817.94, 4655.18), (0.7, 0.7, 0.7), 186.797)
if "particle_339 geometry" not in marker_sets:
s=new_marker_set('particle_339 geometry')
marker_sets["particle_339 geometry"]=s
s= marker_sets["particle_339 geometry"]
mark=s.place_marker((2976.22, 1445.14, 4741.32), (0.7, 0.7, 0.7), 255.534)
if "particle_340 geometry" not in marker_sets:
s=new_marker_set('particle_340 geometry')
marker_sets["particle_340 geometry"]=s
s= marker_sets["particle_340 geometry"]
mark=s.place_marker((2705.39, 1465.91, 5065.15), (0.7, 0.7, 0.7), 153.126)
if "particle_341 geometry" not in marker_sets:
s=new_marker_set('particle_341 geometry')
marker_sets["particle_341 geometry"]=s
s= marker_sets["particle_341 geometry"]
mark=s.place_marker((3054.4, 1404.24, 5234.45), (0.7, 0.7, 0.7), 165.816)
if "particle_342 geometry" not in marker_sets:
s=new_marker_set('particle_342 geometry')
marker_sets["particle_342 geometry"]=s
s= marker_sets["particle_342 geometry"]
mark=s.place_marker((3123.38, 1268.2, 4878.57), (0.7, 0.7, 0.7), 134.429)
if "particle_343 geometry" not in marker_sets:
s=new_marker_set('particle_343 geometry')
marker_sets["particle_343 geometry"]=s
s= marker_sets["particle_343 geometry"]
mark=s.place_marker((3204, 1496.25, 4578.31), (0.7, 0.7, 0.7), 178.971)
if "particle_344 geometry" not in marker_sets:
s=new_marker_set('particle_344 geometry')
marker_sets["particle_344 geometry"]=s
s= marker_sets["particle_344 geometry"]
mark=s.place_marker((3320.65, 1985.83, 4607.16), (0.7, 0.7, 0.7), 189.969)
if "particle_345 geometry" not in marker_sets:
s=new_marker_set('particle_345 geometry')
marker_sets["particle_345 geometry"]=s
s= marker_sets["particle_345 geometry"]
mark=s.place_marker((3783.59, 2280.49, 4885.06), (0.7, 0.7, 0.7), 121.359)
if "particle_346 geometry" not in marker_sets:
s=new_marker_set('particle_346 geometry')
marker_sets["particle_346 geometry"]=s
s= marker_sets["particle_346 geometry"]
mark=s.place_marker((3811.66, 2810.17, 4773.38), (0.7, 0.7, 0.7), 187.262)
if "particle_347 geometry" not in marker_sets:
s=new_marker_set('particle_347 geometry')
marker_sets["particle_347 geometry"]=s
s= marker_sets["particle_347 geometry"]
mark=s.place_marker((3525.69, 3195.89, 4326.59), (0.7, 0.7, 0.7), 164.335)
if "particle_348 geometry" not in marker_sets:
s=new_marker_set('particle_348 geometry')
marker_sets["particle_348 geometry"]=s
s= marker_sets["particle_348 geometry"]
mark=s.place_marker((3081, 3482.89, 4334.29), (0.7, 0.7, 0.7), 138.363)
if "particle_349 geometry" not in marker_sets:
s=new_marker_set('particle_349 geometry')
marker_sets["particle_349 geometry"]=s
s= marker_sets["particle_349 geometry"]
mark=s.place_marker((2853.6, 3803.59, 4334.5), (0.7, 0.7, 0.7), 138.49)
if "particle_350 geometry" not in marker_sets:
s=new_marker_set('particle_350 geometry')
marker_sets["particle_350 geometry"]=s
s= marker_sets["particle_350 geometry"]
mark=s.place_marker((2964.32, 3893.41, 4013.8), (0.7, 0.7, 0.7), 116.325)
if "particle_351 geometry" not in marker_sets:
s=new_marker_set('particle_351 geometry')
marker_sets["particle_351 geometry"]=s
s= marker_sets["particle_351 geometry"]
mark=s.place_marker((3187.5, 3502.13, 3941.02), (0.7, 0.7, 0.7), 106.511)
if "particle_352 geometry" not in marker_sets:
s=new_marker_set('particle_352 geometry')
marker_sets["particle_352 geometry"]=s
s= marker_sets["particle_352 geometry"]
mark=s.place_marker((3284.26, 2978.72, 4108.01), (0.7, 0.7, 0.7), 151.096)
if "particle_353 geometry" not in marker_sets:
s=new_marker_set('particle_353 geometry')
marker_sets["particle_353 geometry"]=s
s= marker_sets["particle_353 geometry"]
mark=s.place_marker((3422.97, 2438.06, 4504.88), (0.7, 0.7, 0.7), 240.856)
if "particle_354 geometry" not in marker_sets:
s=new_marker_set('particle_354 geometry')
marker_sets["particle_354 geometry"]=s
s= marker_sets["particle_354 geometry"]
mark=s.place_marker((3457.47, 2068.94, 4871.79), (0.7, 0.7, 0.7), 149.7)
if "particle_355 geometry" not in marker_sets:
s=new_marker_set('particle_355 geometry')
marker_sets["particle_355 geometry"]=s
s= marker_sets["particle_355 geometry"]
mark=s.place_marker((3393.42, 1729.8, 4891.21), (0.7, 0.7, 0.7), 165.943)
if "particle_356 geometry" not in marker_sets:
s=new_marker_set('particle_356 geometry')
marker_sets["particle_356 geometry"]=s
s= marker_sets["particle_356 geometry"]
mark=s.place_marker((2919.01, 1575.76, 4537.17), (0.7, 0.7, 0.7), 178.971)
if "particle_357 geometry" not in marker_sets:
s=new_marker_set('particle_357 geometry')
marker_sets["particle_357 geometry"]=s
s= marker_sets["particle_357 geometry"]
mark=s.place_marker((2487.87, 1151, 4096.42), (0.7, 0.7, 0.7), 154.945)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 | 5,074,963,802,422,595,000 | 47.804757 | 76 | 0.70266 | false |
Zluurk/pypeman | pypeman/conf.py | 1 | 2483 | #!/usr/bin/env python
# # Copyright : (C) 2014 by MHComm. All rights reserved
#
# Name : conf.py
"""
Summary : TBD...
"""
from __future__ import absolute_import
__author__ = "jeremie"
__copyright__ = "(C) 2016 by MHComm. All rights reserved"
__email__ = "[email protected]"
import sys
import importlib
import traceback
import os
import pypeman.default_settings as default_settings
import logging
import logging.config
NOT_FOUND = object() # sentinel object
class ConfigError(ImportError):
""" custom exception """
class Settings():
""" pypeman projects settings. Rather similar implementations to django.conf.settings """
def __init__(self, module_name=None):
self.__dict__['_settings_mod'] = None
if module_name:
self.__dict__['SETTINGS_MODULE'] = module_name
else:
self.__dict__['SETTINGS_MODULE'] = os.environ.get('PYPEMAN_SETTINGS_MODULE', 'settings')
def init_settings(self):
try:
settings_module = self.__dict__['SETTINGS_MODULE']
settings_mod = self.__dict__['_settings_mod'] = importlib.import_module(settings_module)
except Exception:
msg = "Can't import '%s' module !" % self.__dict__['SETTINGS_MODULE']
print(msg, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
raise ConfigError(msg)
# populate entire dict with values. helpful e.g. for ipython tab completion
default_vals = [(key, val) for (key, val) in default_settings.__dict__.items()
if 'A' <= key[0] <= 'Z']
self.__dict__.update(default_vals)
mod_vals = [(key, val) for (key, val) in settings_mod.__dict__.items()
if 'A' <= key[0] <= 'Z']
self.__dict__.update(mod_vals)
logging.config.dictConfig(self.__dict__['LOGGING'])
def __getattr__(self, name):
""" lazy getattr. first access imports and populates settings """
if name in self.__dict__:
return self.__dict__[name]
if not self.__dict__['_settings_mod']:
self.init_settings()
return self.__dict__[name]
def __setattr__(self, name, value):
""" make sure nobody tries to modify settings manually """
if name in self.__dict__:
self.__dict__[name] = value
else:
print(name, value)
raise Exception("Settings are not editable !")
settings = Settings()
| apache-2.0 | 2,246,862,227,573,352,400 | 30.0375 | 100 | 0.583971 | false |
gamernetwork/gn-django | examples/view_registry/project/settings/base.py | 1 | 3717 | """
Django settings for view_registry project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b@n605*@a59yzz7*mv^zjy(f5q_zkwd^0sl4)0k8k+!wxnpu!t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_jinja',
'core',
'eurogamer',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
"BACKEND": "gn_django.template.backend.Jinja2",
"APP_DIRS": True,
"OPTIONS": {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'gn_django.template.context_processors.settings',
],
}
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'gn_django.template.context_processors.settings',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| mit | -8,052,822,851,751,771,000 | 26.131387 | 91 | 0.664245 | false |
ThomasYeoLab/CBIG | stable_projects/fMRI_dynamics/Kong2021_pMFM/part1_pMFM_main/scripts/CBIG_pMFM_step7_perturbation_analysis.py | 1 | 23030 | # /usr/bin/env python
'''
Written by Kong Xiaolu and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
'''
import os
import numpy as np
import torch
import time
import math
import scipy.io as sio
import CBIG_pMFM_basic_functions_main as fc
import warnings
def torch_max(A, B):
if A.shape != B.shape:
raise ValueError('Dimension mismatch.')
Am = torch.unsqueeze(A, dim=len(A.shape))
Bm = torch.unsqueeze(B, dim=len(B.shape))
C = torch.cat((Am, Bm), dim=len(A.shape))
o = torch.max(C, dim=len(A.shape))
return o[0]
def torch_min(A, B):
if A.shape != B.shape:
raise ValueError('Dimension mismatch.')
Am = torch.unsqueeze(A, dim=len(A.shape))
Bm = torch.unsqueeze(B, dim=len(B.shape))
C = torch.cat((Am, Bm), dim=len(A.shape))
o = torch.min(C, dim=len(A.shape))
return o[0]
def CBIG_mfm_original_simulation(parameter,
sc_mat,
t_epochlong,
noise,
d_t=0.01):
'''
Function used to generate the simulated BOLD signal using mean field
model and hemodynamic model
Each parameter set is ussed to simulated one time
Args:
parameter: (N*3+1)*M matrix.
N is the number of ROI
M is the number of candidate parameter sets
Each column of matrix presents a parameter set, where:
parameter[0:N]: recurrent strength w
parameter[N:2*N]: external input I
parameter[2*N]: Gloable constant G
parameter[2*N+1:3*N+1]: noise amplitude sigma
sc_mat: N*N structural connectivity matrix
t_epochlong:total simulated time
Returns:
bold_d: simulated BOLD signal
'''
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# Initializing system parameters
kstart = 0.
t_pre = 60 * 2
kend = t_pre + 60 * t_epochlong
t_bold = 0.72
# sampling ratio
k_p = torch.arange(kstart, kend + d_t, d_t)
n_nodes = sc_mat.shape[0]
n_samples = k_p.shape[0]
n_set = parameter.shape[1]
# Initializing neural activity
y_t = torch.zeros((n_nodes, n_set))
d_y = torch.zeros((n_nodes, n_set))
# Initializing hemodynamic activity
f_mat = torch.ones((n_nodes, n_set, 4))
z_t = torch.zeros((n_nodes, n_set))
f_t = torch.ones((n_nodes, n_set))
v_t = torch.ones((n_nodes, n_set))
q_t = torch.ones((n_nodes, n_set))
f_mat[:, :, 0] = z_t
y_t[:, :] = 0.001
# Wiener process
w_coef = parameter[2 * n_nodes + 1:3 * n_nodes + 1, :] / math.sqrt(0.001)
if w_coef.shape[0] == 1:
w_coef = w_coef.repeat(n_nodes, 1)
p_costant = 0.34
v_0 = 0.02
k_1 = 4.3 * 28.265 * 3 * 0.0331 * p_costant
k_2 = 0.47 * 110 * 0.0331 * p_costant
k_3 = 0.53
count = 0
y_bold = torch.zeros((n_nodes, n_set, int(n_samples / (t_bold / d_t) + 1)))
s_max = torch.zeros((n_nodes, n_set))
s_min = torch.ones((n_nodes, n_set))
cut_index = int(t_pre / t_bold)
# Warm up
start = time.time()
for i in range(1000):
d_y = fc.CBIG_mfm_rfMRI_ode(y_t, parameter, sc_mat)
y_t = y_t + d_y * d_t + w_coef * noise[:, :, i] * math.sqrt(d_t)
# Main body: calculation
for i in range(n_samples):
d_y = fc.CBIG_mfm_rfMRI_ode(y_t, parameter, sc_mat)
random_num = noise[:, :, i + 1000]
y_t = y_t + d_y * d_t + w_coef * random_num * math.sqrt(d_t)
s_max = torch_max(y_t, s_max)
s_min = torch_min(y_t, s_min)
d_f = fc.CBIG_mfm_rfMRI_BW_ode(y_t, f_mat)
f_mat = f_mat + d_f * d_t
z_t, f_t, v_t, q_t = torch.chunk(f_mat, 4, dim=2)
y_bold_temp = 100 / p_costant * v_0 * (
k_1 * (1 - q_t) + k_2 * (1 - q_t / v_t) + k_3 * (1 - v_t))
y_bold[:, :, count] = y_bold_temp[:, :, 0]
count = count + ((i + 1) % (t_bold / d_t) == 0) * 1
elapsed = time.time() - start
print('The time used for calculating simulated BOLD signal is: ', elapsed)
# Downsampling
bold_d = y_bold[:, :, cut_index + 1:y_bold.shape[2]]
return bold_d, s_max, s_min
def CBIG_mfm_perturbation_simulation(parameter,
sc_mat,
t_epochlong,
noise,
node_mask,
index,
svalue,
d_t=0.01):
'''
Function used to generate the simulated BOLD signal using mean field
model and hemodynamic model
Each parameter set is ussed to simulated one time
Args:
parameter: (N*3+1)*M matrix.
N is the number of ROI
M is the number of candidate parameter sets
Each column of matrix presents a parameter set, where:
parameter[0:N]: recurrent strength w
parameter[N:2*N]: external input I
parameter[2*N]: Gloable constant G
parameter[2*N+1:3*N+1]: noise amplitude sigma
sc_mat: N*N structural connectivity matrix
t_epochlong:total simulated time
Returns:
bold_d: simulated BOLD signal
'''
torch.set_default_tensor_type('torch.cuda.FloatTensor')
range_list = np.load('../output/step7_perturbation_simulation' +
'/range_list.npy')
range_index = range_list[index]
start_point = range_index[0] + 60
# Initializing system parameters
kstart = 0.
t_pre = 60 * 2
kend = t_pre + 60 * t_epochlong
t_bold = 0.72
# sampling ratio
k_p = torch.arange(kstart, kend + d_t, d_t)
n_nodes = sc_mat.shape[0]
n_samples = k_p.shape[0]
n_set = parameter.shape[1]
# Initializing neural activity
y_t = torch.zeros((n_nodes, n_set))
d_y = torch.zeros((n_nodes, n_set))
# Initializing hemodynamic activity
f_mat = torch.ones((n_nodes, n_set, 4))
z_t = torch.zeros((n_nodes, n_set))
f_t = torch.ones((n_nodes, n_set))
v_t = torch.ones((n_nodes, n_set))
q_t = torch.ones((n_nodes, n_set))
f_mat[:, :, 0] = z_t
y_t[:, :] = 0.001
# Wiener process
w_coef = parameter[2 * n_nodes + 1:3 * n_nodes + 1, :] / math.sqrt(0.001)
if w_coef.shape[0] == 1:
w_coef = w_coef.repeat(n_nodes, 1)
p_costant = 0.34
v_0 = 0.02
k_1 = 4.3 * 28.265 * 3 * 0.0331 * p_costant
k_2 = 0.47 * 110 * 0.0331 * p_costant
k_3 = 0.53
count = 0
y_bold = torch.zeros((n_nodes, n_set, int(n_samples / (t_bold / d_t) + 1)))
cut_index = int(t_pre / t_bold)
# Warm up
start = time.time()
for i in range(1000):
d_y = fc.CBIG_mfm_rfMRI_ode(y_t, parameter, sc_mat)
y_t = y_t + d_y * d_t + w_coef * noise[:, :, i] * math.sqrt(d_t)
smax = torch.max(svalue[:, 0]) * node_mask
smin = torch.min(svalue[:, 1]) * node_mask
sign = 0
# Main body: calculation
for i in range(n_samples):
d_y = fc.CBIG_mfm_rfMRI_ode(y_t, parameter, sc_mat)
random_num = noise[:, :, i + 1000]
y_t = y_t + d_y * d_t + w_coef * random_num * math.sqrt(d_t)
if count >= start_point + cut_index and count < start_point + \
cut_index + 1:
y_t_masked = y_t * node_mask
if torch.sum(y_t_masked) != torch.sum(y_t_masked):
break
if sign == 0 and torch.sum(abs(y_t_masked - smax)) <= torch.sum(
abs(y_t_masked - smin)):
sign = -1
def y_func(a):
return a
elif sign == 0 and torch.sum(abs(y_t_masked - smax)) > torch.sum(
abs(y_t_masked - smin)):
sign = 1
def y_func(a):
return smax - a
y_t = y_t + 0.8 * y_func(y_t_masked) * sign
d_f = fc.CBIG_mfm_rfMRI_BW_ode(y_t, f_mat)
f_mat = f_mat + d_f * d_t
z_t, f_t, v_t, q_t = torch.chunk(f_mat, 4, dim=2)
y_bold_temp = 100 / p_costant * v_0 * (
k_1 * (1 - q_t) + k_2 * (1 - q_t / v_t) + k_3 * (1 - v_t))
y_bold[:, :, count] = y_bold_temp[:, :, 0]
count = count + ((i + 1) % (t_bold / d_t) == 0) * 1
elapsed = time.time() - start
print('The time used for calculating simulated BOLD signal is: ', elapsed)
# Downsampling
bold_d = y_bold[:, :, cut_index + 1:y_bold.shape[2]]
return bold_d
def CBIG_pMFM_generate_simulated_original_data(gpu_index=0):
torch.cuda.set_device(gpu_index)
test_file = '../output/step3_test_results/test_all.csv'
output_path = '../output/step7_perturbation_simulation/original'
if not os.path.isdir(output_path):
os.makedirs(output_path)
torch.set_default_tensor_type('torch.cuda.FloatTensor')
n_set = 200
result_all = fc.csv_matrix_read(test_file)
parameter = result_all[11:, 0]
parameter = np.tile(parameter, [n_set, 1]).T
parameter = torch.from_numpy(parameter).type(torch.FloatTensor).cuda()
# Load data
sc_mat_raw = fc.csv_matrix_read('../input/sc_test.csv')
sc_mat = sc_mat_raw / sc_mat_raw.max() * 0.2
sc_mat = torch.from_numpy(sc_mat).type(torch.FloatTensor).cuda()
count = 1
for ti in range(10):
print('Starting ' + str(ti))
torch.cuda.manual_seed(ti)
noise = torch.randn(68, n_set, 99402)
# Calculating simulated BOLD signal using MFM
bold_d, s_max, s_min = CBIG_mfm_original_simulation(
parameter, sc_mat, 14.4, noise)
# Initializing the FC and FCD masks
n_set = bold_d.shape[1]
n_nodes = bold_d.shape[0]
window_size = 83
time_length = 1200 - window_size + 1
sub_num = 10
fc_edgenum = int(n_nodes * (n_nodes - 1) / 2)
fc_mask = torch.triu(torch.ones(n_nodes, n_nodes), 1) == 1
fc_maskm = torch.zeros(n_nodes * sub_num,
n_nodes * sub_num).type(torch.cuda.ByteTensor)
for i in range(sub_num):
fc_maskm[n_nodes * i:n_nodes * (i + 1), n_nodes * i:n_nodes *
(i + 1)] = fc_mask
# Calculating simulated FCD matrices
fcd_all = torch.ones(time_length, time_length, n_set).cpu()
fc_mat = torch.zeros(fc_edgenum, sub_num, time_length)
batch_num = int(n_set / sub_num)
for b in range(batch_num):
bold_temp = bold_d[:, b * sub_num:(b + 1) * sub_num, :]
bold_tempm = bold_temp.transpose(0, 1).contiguous().view(-1, 1200)
for i in range(0, time_length):
bold_fc = fc.torch_corr(bold_tempm[:, i:i + window_size])
cor_temp = bold_fc[fc_maskm]
fc_mat[:, :, i] = torch.transpose(
cor_temp.view(sub_num, fc_edgenum), 0, 1)
for j in range(0, sub_num):
fcd_all[:, :, j + b * sub_num] = fc.torch_corr(
torch.transpose(fc_mat[:, j, :], 0, 1))
bold_numpy = bold_d.cpu().numpy()
fcd_numpy = fcd_all.numpy()
noise_numpy = noise.cpu().numpy()
smax_numpy = s_max.cpu().numpy()
smin_numpy = s_min.cpu().numpy()
# Save out simulated data
fcd_dir = os.path.join(output_path, 'FCD')
if not os.path.isdir(fcd_dir):
os.makedirs(fcd_dir)
tc_dir = os.path.join(output_path, 'TC')
if not os.path.isdir(tc_dir):
os.makedirs(tc_dir)
noise_dir = os.path.join(output_path, 'Noise')
if not os.path.isdir(noise_dir):
os.makedirs(noise_dir)
svalue_dir = os.path.join(output_path, 'Svalue')
if not os.path.isdir(svalue_dir):
os.makedirs(svalue_dir)
for i in range(n_set):
print('Generating simulated TC and FCD number: ' + str(count))
fcd_save = fcd_numpy[:, :, i]
bold_save = bold_numpy[:, i, :]
noise_save = noise_numpy[:, i, :]
svalue_save = np.zeros((n_nodes, 2))
svalue_save[:, 0] = smax_numpy[:, i]
svalue_save[:, 1] = smin_numpy[:, i]
if (fcd_save == fcd_save).all():
np.save(
os.path.join(fcd_dir, 'FCD_' + str(count) + '.npy'),
fcd_save)
np.save(
os.path.join(tc_dir, 'TC_' + str(count) + '.npy'),
bold_save)
np.save(
os.path.join(noise_dir, 'Noise_' + str(count) + '.npy'),
noise_save)
np.save(
os.path.join(svalue_dir, 'Svalue_' + str(count) + '.npy'),
svalue_save)
count += 1
if count > 1000:
break
if count > 1000:
break
torch.cuda.empty_cache()
def CBIG_pMFM_determine_time_range():
index_list = []
range_list = []
for index in range(1, 1001):
FCD_mat = np.load(
'../output/step7_perturbation_simulation/original/FCD/FCD_' +
str(index) + '.npy')
FCD_mean = np.mean(FCD_mat, 1)
fcd_low = 1 * (FCD_mean < 0.6)
len_count = 0
max_count = 0
range_index = np.array([0, 0])
temp_start = 0
for i in range(1, fcd_low.shape[0]):
if fcd_low[i] == 1:
len_count += 1
if fcd_low[i - 1] == 0:
temp_start = i
elif fcd_low[i] == 0 and fcd_low[i - 1] == 1:
if max_count < len_count:
max_count = len_count
range_index[1] = i
range_index[0] = temp_start
len_count = 0
if max_count < len_count:
max_count = len_count
range_index[1] = i
range_index[0] = temp_start
# Only when the stable states last for more than 200 time step,
# the FCD can be used in the perturbation experiment
if max_count >= 200:
index_list.append(index)
range_list.append(range_index)
# the index_list contains the indexes of FCD which can add perturbation
# the range_list contains the perturbation injection time point
np.save('../output/step7_perturbation_simulation/index_list.npy',
index_list)
np.save('../output/step7_perturbation_simulation/range_list.npy',
range_list)
def CBIG_pMFM_generate_perturbed_FCD(gpu_index=0,
region_num=5,
region_indi='top'):
test_file = '../output/step3_test_results/test_all.csv'
output_path = '../output/step7_perturbation_simulation/' + \
region_indi + str(
region_num) + '_regions'
if not os.path.isdir(output_path):
os.makedirs(output_path)
torch.set_default_tensor_type('torch.cuda.FloatTensor')
torch.cuda.set_device(gpu_index)
n_set = 1
result_all = fc.csv_matrix_read(test_file)
parameter = result_all[11:, 0]
parameter = np.tile(parameter, [n_set, 1]).T
parameter = torch.from_numpy(parameter).type(torch.FloatTensor).cuda()
# Load data
emp_fcd = sio.loadmat('../input/fcd_test.mat')
emp_fcd = np.array(emp_fcd['test_aveM'])
sc_mat_raw = fc.csv_matrix_read('../input/sc_test.csv')
sc_mat = sc_mat_raw / sc_mat_raw.max() * 0.2
sc_mat = torch.from_numpy(sc_mat).type(torch.FloatTensor).cuda()
emp_fc = fc.csv_matrix_read('../input/fc_test.csv')
emp_fc = torch.from_numpy(emp_fc).type(torch.FloatTensor).cuda()
sim_grad_corr = sio.loadmat(
'../output/step5_STDFCD_results/STD_FCD_simulated.mat')
sim_grad_corr = np.array(sim_grad_corr['SWSTD_FCD_sim'])
sim_grad_corrM = np.tile(sim_grad_corr, [1, n_set])
node_maskM = torch.from_numpy(sim_grad_corrM).cuda()
sim_grad_corr_sort = torch.from_numpy(np.sort(sim_grad_corr)).cuda()
if region_indi == 'top':
node_mask = 1 * (node_maskM > sim_grad_corr_sort[-6]).type(
torch.FloatTensor).cuda()
else:
node_mask = 1 * (node_maskM < sim_grad_corr_sort[5]).type(
torch.FloatTensor).cuda()
index_list = np.load(
'../output/step7_perturbation_simulation/index_list.npy')
fcd_dir = os.path.join(output_path, 'FCD')
if not os.path.isdir(fcd_dir):
os.makedirs(fcd_dir)
tc_dir = os.path.join(output_path, 'TC')
if not os.path.isdir(tc_dir):
os.makedirs(tc_dir)
for i in range(0, len(index_list)):
index = index_list[i]
print('Analyzing index ' + str(index))
if os.path.isfile(os.path.join(fcd_dir, 'FCD_' + str(index) + '.npy')):
continue
noise_numpy = np.load(
'../output/step7_perturbation_simulation/original/Noise'
'/Noise_' + str(index) + '.npy')
noise = torch.from_numpy(noise_numpy).cuda()
noise = torch.unsqueeze(noise, dim=1)
svalue_numpy = np.load(
'../output/step7_perturbation_simulation/original/Svalue'
'/Svalue_' + str(index) + '.npy')
svalue = torch.from_numpy(svalue_numpy).type(torch.FloatTensor).cuda()
# Calculating simulated BOLD signal using MFM
bold_d = CBIG_mfm_perturbation_simulation(parameter, sc_mat, 14.4,
noise, node_mask, i, svalue)
# Initializing the FC and FCD masks
n_set = bold_d.shape[1]
n_nodes = bold_d.shape[0]
window_size = 83
time_length = 1200 - window_size + 1
sub_num = 1
fc_edgenum = int(n_nodes * (n_nodes - 1) / 2)
fc_mask = torch.triu(torch.ones(n_nodes, n_nodes), 1) == 1
fc_maskm = torch.zeros(n_nodes * sub_num,
n_nodes * sub_num).type(torch.cuda.ByteTensor)
for i in range(sub_num):
fc_maskm[n_nodes * i:n_nodes * (i + 1), n_nodes * i:n_nodes *
(i + 1)] = fc_mask
# Calculating CDF for simulated FCD matrices
fcd_all = torch.ones(time_length, time_length, n_set).cpu()
fc_mat = torch.zeros(fc_edgenum, sub_num, time_length)
batch_num = int(n_set / sub_num)
for b in range(batch_num):
bold_temp = bold_d[:, b * sub_num:(b + 1) * sub_num, :]
bold_tempm = bold_temp.transpose(0, 1).contiguous().view(-1, 1200)
for i in range(0, time_length):
bold_fc = fc.torch_corr(bold_tempm[:, i:i + window_size])
cor_temp = bold_fc[fc_maskm]
fc_mat[:, :, i] = torch.transpose(
cor_temp.view(sub_num, fc_edgenum), 0, 1)
for j in range(0, sub_num):
fcd_all[:, :, j + b * sub_num] = fc.torch_corr(
torch.transpose(fc_mat[:, j, :], 0, 1))
bold_numpy = bold_d.cpu().numpy()
fcd_numpy = fcd_all.numpy()
fcd_save = fcd_numpy[:, :, 0]
bold_save = bold_numpy[:, 0, :]
np.save(os.path.join(fcd_dir, 'FCD_' + str(index) + '.npy'), fcd_save)
np.save(os.path.join(tc_dir, 'TC_' + str(index) + '.npy'), bold_save)
def CBIG_pMFM_analysis_perturbed_FCD(region_num=5):
index_list = np.load(
'../output/step7_perturbation_simulation/index_list.npy')
range_list = np.load(
'../output/step7_perturbation_simulation/range_list.npy')
origin_edges_all = np.array([])
top_edges_all = np.array([])
bottom_edges_all = np.array([])
window_len = 200
for i in range(0, index_list.shape[0]):
index = index_list[i]
fcd_origin = np.load(
'../output/step7_perturbation_simulation/original/FCD/FCD_' +
str(index) + '.npy')
fcd_top = np.load('../output/step7_perturbation_simulation/top' +
str(region_num) + '_regions/FCD/FCD_' + str(index) +
'.npy')
bold_top = np.load('../output/step7_perturbation_simulation/top' +
str(region_num) + '_regions/TC/TC_' + str(index) +
'.npy')
fcd_bottom = np.load('../output/step7_perturbation_simulation/bottom' +
str(region_num) + '_regions/FCD/FCD_' +
str(index) + '.npy')
bold_bottom = np.load(
'../output/step7_perturbation_simulation/bottom' +
str(region_num) + '_regions/TC/TC_' + str(index) + '.npy')
if np.sum(bold_top[-1, :]) == 0 or np.isnan(np.sum(bold_top[-1, :])):
continue
if np.sum(bold_bottom[-1, :]) == 0 or np.isnan(
np.sum(bold_bottom[-1, :])):
continue
range_index = range_list[i]
perturb_start = range_index[0] + 18
perturb_end = min(perturb_start + window_len, 1118)
mat_origin = fcd_origin[perturb_start:perturb_end, perturb_start:
perturb_end]
mat_top = fcd_top[perturb_start:perturb_end, perturb_start:perturb_end]
mat_bottom = fcd_bottom[perturb_start:perturb_end, perturb_start:
perturb_end]
origin_edges = np.mean(mat_origin, 1)
top_edges = np.mean(mat_top, 1)
bottom_edges = np.mean(mat_bottom, 1)
origin_edges_all = np.concatenate((origin_edges_all,
np.array([np.mean(origin_edges)])))
top_edges_all = np.concatenate((top_edges_all,
np.array([np.mean(top_edges)])))
bottom_edges_all = np.concatenate((bottom_edges_all,
np.array([np.mean(bottom_edges)])))
output_dir = '../output/step7_perturbation_simulation/stats'
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
origin_data = {}
origin_data['origin_edge'] = origin_edges_all
top_data = {}
top_data['top_edge'] = top_edges_all
bottom_data = {}
bottom_data['bottom_edge'] = bottom_edges_all
sio.savemat(os.path.join(output_dir, 'origin_data.mat'), origin_data)
sio.savemat(os.path.join(output_dir, 'top_data.mat'), top_data)
sio.savemat(os.path.join(output_dir, 'bottom_data.mat'), bottom_data)
if __name__ == '__main__':
warnings.filterwarnings("ignore", category=RuntimeWarning)
print('Start generating original siumulated data.')
CBIG_pMFM_generate_simulated_original_data()
print('Start determining perturbation starting time.')
CBIG_pMFM_determine_time_range()
print('Start generating perturbed simulated data')
CBIG_pMFM_generate_perturbed_FCD(region_indi='top')
CBIG_pMFM_generate_perturbed_FCD(region_indi='bottom')
print('Start computing the final results')
CBIG_pMFM_analysis_perturbed_FCD()
| mit | -2,650,524,098,235,753,000 | 36.145161 | 79 | 0.533608 | false |
ptisserand/ansible | lib/ansible/modules/cloud/cloudstack/cs_loadbalancer_rule_member.py | 13 | 9648 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Darren Worrall <[email protected]>
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_loadbalancer_rule_member
short_description: Manages load balancer rule members on Apache CloudStack based clouds.
description:
- Add and remove load balancer rule members.
version_added: '2.0'
author:
- "Darren Worrall (@dazworrall)"
- "René Moser (@resmo)"
options:
name:
description:
- The name of the load balancer rule.
required: true
ip_address:
description:
- Public IP address from where the network traffic will be load balanced from.
- Only needed to find the rule if C(name) is not unique.
aliases: [ 'public_ip' ]
vms:
description:
- List of VMs to assign to or remove from the rule.
required: true
aliases: [ 'vm' ]
state:
description:
- Should the VMs be present or absent from the rule.
default: 'present'
choices: [ 'present', 'absent' ]
project:
description:
- Name of the project the firewall rule is related to.
domain:
description:
- Domain the rule is related to.
account:
description:
- Account the rule is related to.
zone:
description:
- Name of the zone in which the rule should be located.
- If not set, default zone is used.
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Add VMs to an existing load balancer
- local_action:
module: cs_loadbalancer_rule_member
name: balance_http
vms:
- web01
- web02
# Remove a VM from an existing load balancer
- local_action:
module: cs_loadbalancer_rule_member
name: balance_http
vms:
- web01
- web02
state: absent
# Rolling upgrade of hosts
- hosts: webservers
serial: 1
pre_tasks:
- name: Remove from load balancer
local_action:
module: cs_loadbalancer_rule_member
name: balance_http
vm: "{{ ansible_hostname }}"
state: absent
tasks:
# Perform update
post_tasks:
- name: Add to load balancer
local_action:
module: cs_loadbalancer_rule_member
name: balance_http
vm: "{{ ansible_hostname }}"
state: present
'''
RETURN = '''
---
id:
description: UUID of the rule.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
zone:
description: Name of zone the rule is related to.
returned: success
type: string
sample: ch-gva-2
project:
description: Name of project the rule is related to.
returned: success
type: string
sample: Production
account:
description: Account the rule is related to.
returned: success
type: string
sample: example account
domain:
description: Domain the rule is related to.
returned: success
type: string
sample: example domain
algorithm:
description: Load balancer algorithm used.
returned: success
type: string
sample: "source"
cidr:
description: CIDR to forward traffic from.
returned: success
type: string
sample: ""
name:
description: Name of the rule.
returned: success
type: string
sample: "http-lb"
description:
description: Description of the rule.
returned: success
type: string
sample: "http load balancer rule"
protocol:
description: Protocol of the rule.
returned: success
type: string
sample: "tcp"
public_port:
description: Public port.
returned: success
type: string
sample: 80
private_port:
description: Private IP address.
returned: success
type: string
sample: 80
public_ip:
description: Public IP address.
returned: success
type: string
sample: "1.2.3.4"
vms:
description: Rule members.
returned: success
type: list
sample: '[ "web01", "web02" ]'
tags:
description: List of resource tags associated with the rule.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
state:
description: State of the rule.
returned: success
type: string
sample: "Add"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackLBRuleMember(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackLBRuleMember, self).__init__(module)
self.returns = {
'publicip': 'public_ip',
'algorithm': 'algorithm',
'cidrlist': 'cidr',
'protocol': 'protocol',
}
# these values will be casted to int
self.returns_to_int = {
'publicport': 'public_port',
'privateport': 'private_port',
}
def get_rule(self):
args = self._get_common_args()
args.update({
'name': self.module.params.get('name'),
'zoneid': self.get_zone(key='id') if self.module.params.get('zone') else None,
})
if self.module.params.get('ip_address'):
args['publicipid'] = self.get_ip_address(key='id')
rules = self.query_api('listLoadBalancerRules', **args)
if rules:
if len(rules['loadbalancerrule']) > 1:
self.module.fail_json(msg="More than one rule having name %s. Please pass 'ip_address' as well." % args['name'])
return rules['loadbalancerrule'][0]
return None
def _get_common_args(self):
return {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
}
def _get_members_of_rule(self, rule):
res = self.query_api('listLoadBalancerRuleInstances', id=rule['id'])
return res.get('loadbalancerruleinstance', [])
def _ensure_members(self, operation):
if operation not in ['add', 'remove']:
self.module.fail_json(msg="Bad operation: %s" % operation)
rule = self.get_rule()
if not rule:
self.module.fail_json(msg="Unknown rule: %s" % self.module.params.get('name'))
existing = {}
for vm in self._get_members_of_rule(rule=rule):
existing[vm['name']] = vm['id']
wanted_names = self.module.params.get('vms')
if operation == 'add':
cs_func = self.cs.assignToLoadBalancerRule
to_change = set(wanted_names) - set(existing.keys())
else:
cs_func = self.cs.removeFromLoadBalancerRule
to_change = set(wanted_names) & set(existing.keys())
if not to_change:
return rule
args = self._get_common_args()
vms = self.query_api('listVirtualMachines', **args)
to_change_ids = []
for name in to_change:
for vm in vms.get('virtualmachine', []):
if vm['name'] == name:
to_change_ids.append(vm['id'])
break
else:
self.module.fail_json(msg="Unknown VM: %s" % name)
if to_change_ids:
self.result['changed'] = True
if to_change_ids and not self.module.check_mode:
res = cs_func(
id=rule['id'],
virtualmachineids=to_change_ids,
)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res)
rule = self.get_rule()
return rule
def add_members(self):
return self._ensure_members('add')
def remove_members(self):
return self._ensure_members('remove')
def get_result(self, rule):
super(AnsibleCloudStackLBRuleMember, self).get_result(rule)
if rule:
self.result['vms'] = []
for vm in self._get_members_of_rule(rule=rule):
self.result['vms'].append(vm['name'])
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
ip_address=dict(aliases=['public_ip']),
vms=dict(required=True, aliases=['vm'], type='list'),
state=dict(choices=['present', 'absent'], default='present'),
zone=dict(),
domain=dict(),
project=dict(),
account=dict(),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_lb_rule_member = AnsibleCloudStackLBRuleMember(module)
state = module.params.get('state')
if state in ['absent']:
rule = acs_lb_rule_member.remove_members()
else:
rule = acs_lb_rule_member.add_members()
result = acs_lb_rule_member.get_result(rule)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,431,027,037,259,169,000 | 26.95942 | 128 | 0.619013 | false |
pavelchristof/gomoku-ai | tensorflow/contrib/keras/python/keras/engine/topology_test.py | 2 | 26468 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for layer graphs construction & handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
try:
import yaml # pylint:disable=g-import-not-at-top
except ImportError:
yaml = None
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TopologyConstructionTest(test.TestCase):
def test_get_updates_for(self):
a = keras.layers.Input(shape=(2,))
dense_layer = keras.layers.Dense(1)
dense_layer.add_update(0, inputs=a)
dense_layer.add_update(1, inputs=None)
self.assertListEqual(dense_layer.get_updates_for(a), [0])
self.assertListEqual(dense_layer.get_updates_for(None), [1])
def test_get_losses_for(self):
a = keras.layers.Input(shape=(2,))
dense_layer = keras.layers.Dense(1)
dense_layer.add_loss(0, inputs=a)
dense_layer.add_loss(1, inputs=None)
self.assertListEqual(dense_layer.get_losses_for(a), [0])
self.assertListEqual(dense_layer.get_losses_for(None), [1])
def test_trainable_weights(self):
a = keras.layers.Input(shape=(2,))
b = keras.layers.Dense(1)(a)
model = keras.models.Model(a, b)
weights = model.weights
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
model.trainable = True
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[1].trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
# sequential model
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=2))
weights = model.weights
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
model.trainable = True
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[0].trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
def test_weight_loading(self):
with self.test_session():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
model = keras.models.Model(a, b)
x = np.random.random((3, 2))
ref_y = model.predict(x)
weights = model.get_weights()
model.set_weights(weights)
y = model.predict(x)
self.assertAllClose(ref_y, y)
with self.assertRaises(ValueError):
model.set_weights(weights[1:])
with self.assertRaises(ValueError):
model.set_weights(weights[::-1])
if h5py is None:
return # Skip rest of test if H5py isn't available.
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
model.save_weights(h5_path)
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
model.load_weights(h5_path, by_name=True)
y = model.predict(x)
self.assertAllClose(ref_y, y)
def test_learning_phase(self):
with self.test_session():
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
a_2 = keras.layers.Dense(16, name='dense_1')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
b_2 = dp(b)
self.assertFalse(a_2._uses_learning_phase)
self.assertTrue(b_2._uses_learning_phase)
# test merge
m = keras.layers.concatenate([a_2, b_2])
self.assertTrue(m._uses_learning_phase)
# Test recursion
model = keras.models.Model([a, b], [a_2, b_2])
self.assertTrue(model.uses_learning_phase)
c = keras.layers.Input(shape=(32,), name='input_c')
d = keras.layers.Input(shape=(32,), name='input_d')
c_2, b_2 = model([c, d])
self.assertTrue(c_2._uses_learning_phase)
self.assertTrue(b_2._uses_learning_phase)
# try actually running graph
fn = keras.backend.function(
model.inputs + [keras.backend.learning_phase()], model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs_no_dp = fn([input_a_np, input_b_np, 0])
fn_outputs_dp = fn([input_a_np, input_b_np, 1])
# output a: nothing changes
self.assertEqual(fn_outputs_no_dp[0].sum(), fn_outputs_dp[0].sum())
# output b: dropout applied
self.assertNotEqual(fn_outputs_no_dp[1].sum(), fn_outputs_dp[1].sum())
def test_layer_call_arguments(self):
# Test the ability to pass and serialize arguments to `call`.
inp = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(inp)
x = keras.layers.Dropout(0.5)(x, training=True)
model = keras.models.Model(inp, x)
self.assertFalse(model.uses_learning_phase)
# Test that argument is kept when applying the model
inp2 = keras.layers.Input(shape=(2,))
out2 = model(inp2)
self.assertFalse(out2._uses_learning_phase)
# Test that argument is kept after loading a model
config = model.get_config()
model = keras.models.Model.from_config(config)
self.assertFalse(model.uses_learning_phase)
def test_node_construction(self):
# test basics
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
with self.assertRaises(ValueError):
_ = keras.layers.Input(shape=(32,), batch_shape=(10, 32))
with self.assertRaises(ValueError):
_ = keras.layers.Input(shape=(32,), unknwon_kwarg=None)
self.assertListEqual(a.get_shape().as_list(), [None, 32])
a_layer, a_node_index, a_tensor_index = a._keras_history
b_layer, _, _ = b._keras_history
self.assertEqual(len(a_layer.inbound_nodes), 1)
self.assertEqual(a_tensor_index, 0)
node = a_layer.inbound_nodes[a_node_index]
self.assertEqual(node.outbound_layer, a_layer)
self.assertListEqual(node.inbound_layers, [])
self.assertListEqual(node.input_tensors, [a])
self.assertListEqual(node.input_shapes, [(None, 32)])
self.assertListEqual(node.output_tensors, [a])
self.assertListEqual(node.output_shapes, [(None, 32)])
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
self.assertEqual(len(dense.inbound_nodes), 2)
self.assertEqual(len(dense.outbound_nodes), 0)
self.assertListEqual(dense.inbound_nodes[0].inbound_layers, [a_layer])
self.assertEqual(dense.inbound_nodes[0].outbound_layer, dense)
self.assertListEqual(dense.inbound_nodes[1].inbound_layers, [b_layer])
self.assertEqual(dense.inbound_nodes[1].outbound_layer, dense)
self.assertListEqual(dense.inbound_nodes[0].input_tensors, [a])
self.assertListEqual(dense.inbound_nodes[1].input_tensors, [b])
# test layer properties
test_layer = keras.layers.Dense(16, name='test_layer')
a_test = test_layer(a)
self.assertListEqual(test_layer.kernel.get_shape().as_list(), [32, 16])
self.assertEqual(test_layer.input, a)
self.assertEqual(test_layer.output, a_test)
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, (None, 16))
self.assertEqual(dense.get_input_at(0), a)
self.assertEqual(dense.get_input_at(1), b)
self.assertEqual(dense.get_output_at(0), a_2)
self.assertEqual(dense.get_output_at(1), b_2)
self.assertEqual(dense.get_input_shape_at(0), (None, 32))
self.assertEqual(dense.get_input_shape_at(1), (None, 32))
self.assertEqual(dense.get_output_shape_at(0), (None, 16))
self.assertEqual(dense.get_output_shape_at(1), (None, 16))
self.assertEqual(dense.get_input_mask_at(0), None)
self.assertEqual(dense.get_input_mask_at(1), None)
self.assertEqual(dense.get_output_mask_at(0), None)
self.assertEqual(dense.get_output_mask_at(1), None)
def test_multi_input_layer(self):
with self.test_session():
# test multi-input layer
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
self.assertListEqual(merged.get_shape().as_list(), [None, 16 * 2])
merge_layer, merge_node_index, merge_tensor_index = merged._keras_history
self.assertEqual(merge_node_index, 0)
self.assertEqual(merge_tensor_index, 0)
self.assertEqual(len(merge_layer.inbound_nodes), 1)
self.assertEqual(len(merge_layer.outbound_nodes), 0)
self.assertEqual(len(merge_layer.inbound_nodes[0].input_tensors), 2)
self.assertEqual(len(merge_layer.inbound_nodes[0].inbound_layers), 2)
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
self.assertEqual(len(model.layers), 6)
output_shapes = model._compute_output_shape([(None, 32), (None, 32)])
self.assertListEqual(output_shapes[0].as_list(), [None, 64])
self.assertListEqual(output_shapes[1].as_list(), [None, 5])
self.assertListEqual(
model.compute_mask([a, b], [None, None]), [None, None])
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([l.name for l in model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in model._output_layers],
['dense_2', 'dense_3'])
# actually run model
fn = keras.backend.function(model.inputs, model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
# test get_source_inputs
self.assertListEqual(keras.engine.topology.get_source_inputs(c), [a, b])
# serialization / deserialization
json_config = model.to_json()
recreated_model = keras.models.model_from_json(json_config)
recreated_model.compile('rmsprop', 'mse')
self.assertListEqual([l.name for l in recreated_model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in recreated_model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in recreated_model._output_layers],
['dense_2', 'dense_3'])
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
def test_recursion(self):
with self.test_session():
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
e = keras.layers.Input(shape=(32,), name='input_e')
f = keras.layers.Input(shape=(32,), name='input_f')
g, h = model([e, f])
self.assertListEqual(g.get_shape().as_list(), c.get_shape().as_list())
self.assertListEqual(h.get_shape().as_list(), d.get_shape().as_list())
# test separate manipulation of different layer outputs
i = keras.layers.Dense(7, name='dense_4')(h)
final_model = keras.models.Model(
inputs=[e, f], outputs=[i, g], name='final')
self.assertEqual(len(final_model.inputs), 2)
self.assertEqual(len(final_model.outputs), 2)
self.assertEqual(len(final_model.layers), 4)
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([layer.name for layer in final_model.layers][2:],
['model', 'dense_4'])
self.assertListEqual(
model.compute_mask([e, f], [None, None]), [None, None])
self.assertListEqual(
final_model._compute_output_shape([(10, 32), (10, 32)]), [(10, 7),
(10, 64)])
# run recursive model
fn = keras.backend.function(final_model.inputs, final_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
# test serialization
model_config = final_model.get_config()
recreated_model = keras.models.Model.from_config(model_config)
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
def test_multi_input_multi_output_recursion(self):
with self.test_session():
# test multi-input multi-output
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
_, n = model([j, k])
o = keras.layers.Input(shape=(32,), name='input_o')
p = keras.layers.Input(shape=(32,), name='input_p')
q, _ = model([o, p])
self.assertListEqual(n.get_shape().as_list(), [None, 5])
self.assertListEqual(q.get_shape().as_list(), [None, 64])
s = keras.layers.concatenate([n, q], name='merge_nq')
self.assertListEqual(s.get_shape().as_list(), [None, 64 + 5])
# test with single output as 1-elem list
multi_io_model = keras.models.Model([j, k, o, p], [s])
fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test with single output as tensor
multi_io_model = keras.models.Model([j, k, o, p], s)
fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test serialization
model_config = multi_io_model.get_config()
recreated_model = keras.models.Model.from_config(model_config)
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
config = model.get_config()
keras.models.Model.from_config(config)
model.summary()
json_str = model.to_json()
keras.models.model_from_json(json_str)
if yaml is not None:
yaml_str = model.to_yaml()
keras.models.model_from_yaml(yaml_str)
def test_invalid_graphs(self):
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
# input is not an Input tensor
j = keras.layers.Input(shape=(32,), name='input_j')
j = keras.layers.Dense(32)(j)
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k], [m, n])
# disconnected graph
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j], [m, n])
# redundant outputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
keras.models.Model([j, k], [m, n, n])
# redundant inputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k, j], [m, n])
# i have not idea what I'm doing: garbage as inputs/outputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k], [m, n, 0])
def test_raw_tf_compatibility(self):
# test calling layers/models on TF tensors
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
tf_model = keras.models.Model([j, k], [m, n])
j_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
k_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
m_tf, n_tf = tf_model([j_tf, k_tf])
self.assertListEqual(m_tf.get_shape().as_list(), [None, 64])
self.assertListEqual(n_tf.get_shape().as_list(), [None, 5])
# test merge
keras.layers.concatenate([j_tf, k_tf], axis=1)
keras.layers.add([j_tf, k_tf])
# test tensor input
x = array_ops.placeholder(shape=(None, 2), dtype=dtypes.float32)
keras.layers.InputLayer(input_tensor=x)
x = keras.layers.Input(tensor=x)
keras.layers.Dense(2)(x)
def test_basic_masking(self):
a = keras.layers.Input(shape=(10, 32), name='input_a')
b = keras.layers.Masking()(a)
model = keras.models.Model(a, b)
self.assertEqual(model.output_mask.get_shape().as_list(), [None, 10])
def test_weight_preprocessing(self):
input_dim = 3
output_dim = 3
size = 2
cases = [
[
(keras.layers.Bidirectional(keras.layers.SimpleRNN(2))),
[np.random.random((2, 1)), np.random.random((2, 1))],
(None, 3, 2),
],
[
(keras.layers.TimeDistributed(keras.layers.Dense(1))),
[np.random.random((2, 1)), np.random.random((1,))],
(None, 3, 2),
],
[
(keras.layers.Conv1D(output_dim, size, use_bias=False)),
[np.random.random((output_dim, input_dim, size, 1))],
(None, 4, input_dim),
],
[
(keras.layers.Conv2D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_last')),
[np.random.random((size, size, input_dim, output_dim))],
(None, 4, 4, input_dim),
],
[
(keras.layers.Conv3D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size, size))],
(None, input_dim, 4, 4, 4),
],
[
(keras.layers.GRU(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
[
(keras.layers.LSTM(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
]
for layer, weights, input_shape in cases:
layer.build(input_shape)
_ = keras.engine.topology.preprocess_weights_for_loading(
layer, weights, original_keras_version='1')
model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)])
_ = keras.engine.topology.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
x = keras.Input((2,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
_ = keras.engine.topology.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
def test_layer_sharing_at_heterogenous_depth(self):
with self.test_session():
x_val = np.random.random((10, 5))
x = keras.Input(shape=(5,))
a = keras.layers.Dense(5, name='A')
b = keras.layers.Dense(5, name='B')
output = a(b(a(b(x))))
m = keras.models.Model(x, output)
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = keras.models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
def test_layer_sharing_at_heterogenous_depth_with_concat(self):
with self.test_session():
input_shape = (16, 9, 3)
input_layer = keras.Input(shape=input_shape)
a = keras.layers.Dense(3, name='dense_A')
b = keras.layers.Dense(3, name='dense_B')
c = keras.layers.Dense(3, name='dense_C')
x1 = b(a(input_layer))
x2 = a(c(input_layer))
output = keras.layers.concatenate([x1, x2])
m = keras.models.Model(inputs=input_layer, outputs=output)
x_val = np.random.random((10, 16, 9, 3))
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = keras.models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
if __name__ == '__main__':
test.main()
| apache-2.0 | 9,215,489,870,715,391,000 | 37.415094 | 80 | 0.611984 | false |
fomars/yandex-tank | yandextank/plugins/Pandora/plugin.py | 1 | 12105 | import datetime
import logging
import subprocess
import time
import os
import shutil
from threading import Event
import yaml
from netort.resource import manager as resource_manager
from netort.resource import HttpOpener
from .reader import PandoraStatsReader
from ..Console import Plugin as ConsolePlugin
from ..Console import screen as ConsoleScreen
from ..Phantom import PhantomReader, string_to_df
from ...common.interfaces import AbstractInfoWidget, GeneratorPlugin
from ...common.util import tail_lines, FileMultiReader
logger = logging.getLogger(__name__)
class Plugin(GeneratorPlugin):
""" Pandora load generator plugin """
OPTION_CONFIG = "config"
SECTION = "pandora"
DEFAULT_REPORT_FILE = "phout.log"
DEFAULT_EXPVAR_PORT = 1234
def __init__(self, core, cfg, name):
super(Plugin, self).__init__(core, cfg, name)
self.output_finished = Event()
self.enum_ammo = False
self.pandora_cmd = None
self.pandora_config_file = None
self.config_contents = None
self.custom_config = False
self.expvar = self.get_option('expvar')
self.expvar_enabled = self.expvar
self.expvar_port = self.DEFAULT_EXPVAR_PORT
self.report_file = None
self.__address = None
self.__schedule = None
self.ammofile = None
self.process_stderr_file = None
self.resources = []
@staticmethod
def get_key():
return __file__
def get_available_options(self):
opts = [
"pandora_cmd", "buffered_seconds",
"config_content", "config_file"
]
return opts
def configure(self):
self.report_file = self.get_option("report_file")
self.buffered_seconds = self.get_option("buffered_seconds")
self.affinity = self.get_option("affinity", "")
self.resources = self.get_option("resources")
# if we use custom pandora binary, we can download it and make it executable
self.pandora_cmd = self.get_resource(self.get_option("pandora_cmd"), "./pandora", permissions=0755)
# download all resources from self.get_options("resources")
if len(self.resources) > 0:
for resource in self.resources:
self.get_resource(resource["src"], resource["dst"])
# get config_contents and patch it: expand resources via resource manager
# config_content option has more priority over config_file
if self.get_option("config_content"):
logger.info('Found config_content option configuration')
self.config_contents = self.__patch_raw_config_and_dump(self.get_option("config_content"))
elif self.get_option("config_file"):
logger.info('Found config_file option configuration')
with open(self.get_option("config_file"), 'rb') as config:
external_file_config_contents = yaml.load(config.read())
self.config_contents = self.__patch_raw_config_and_dump(external_file_config_contents)
else:
raise RuntimeError("Neither pandora.config_content, nor pandora.config_file specified")
logger.debug('Config after parsing for patching: %s', self.config_contents)
# find report filename and add to artifacts
self.report_file = self.__find_report_filename()
with open(self.report_file, 'w'):
pass
self.core.add_artifact_file(self.report_file)
def __patch_raw_config_and_dump(self, cfg_dict):
if not cfg_dict:
raise RuntimeError('Empty pandora config')
# patch
config_content = self.patch_config(cfg_dict)
# dump
self.pandora_config_file = self.core.mkstemp(".yaml", "pandora_config_")
self.core.add_artifact_file(self.pandora_config_file)
with open(self.pandora_config_file, 'w') as config_file:
yaml.dump(config_content, config_file)
return config_content
def patch_config(self, config):
"""
download remote resources, replace links with local filenames
add result file section
:param dict config: pandora config
"""
# get expvar parameters
if config.get("monitoring"):
if config["monitoring"].get("expvar"):
self.expvar_enabled = config["monitoring"]["expvar"].get("enabled")
if config["monitoring"]["expvar"].get("port"):
self.expvar_port = config["monitoring"]["expvar"].get("port")
# or set if expvar not exists
elif not self.expvar:
config["monitoring"] = {
"expvar": {
"enabled": True,
}
}
self.expvar_enabled = True
# FIXME this is broken for custom ammo providers due to interface incompatibility
# FIXME refactor pandora plx
for pool in config['pools']:
if pool.get('ammo', {}).get('file', ''):
self.ammofile = pool['ammo']['file']
opener = resource_manager.get_opener(self.ammofile)
if isinstance(opener, HttpOpener):
pool['ammo']['file'] = opener.download_file(True, try_ungzip=True)
else:
pool['ammo']['file'] = opener.get_filename
if not pool.get('result') or 'phout' not in pool.get('result', {}).get('type', ''):
logger.warning('Seems like pandora result file not specified... adding defaults')
pool['result'] = dict(
destination=self.DEFAULT_REPORT_FILE,
type='phout',
)
return config
@property
def address(self):
if not self.__address:
for pool in self.config_contents['pools']:
if pool.get('gun', {}).get('target'):
self.__address = pool.get('gun', {}).get('target')
break
else:
self.__address = 'unknown'
return self.__address
@property
def schedule(self):
if not self.__schedule:
for pool in self.config_contents['pools']:
if pool.get('rps'):
self.__schedule = pool.get('rps')
break
else:
self.__schedule = 'unknown'
return self.__schedule
def get_info(self):
return self.Info(
address=self.address,
ammo_file=self.ammofile,
duration=0,
instances=0,
loop_count=0,
port=self.address.split(':')[-1],
rps_schedule=self.schedule
)
def __find_report_filename(self):
for pool in self.config_contents['pools']:
if self.report_file:
return self.report_file
if pool.get('result', {}).get('destination', None):
report_filename = pool.get('result').get('destination')
logger.info('Found report file in pandora config: %s', report_filename)
return report_filename
return self.DEFAULT_REPORT_FILE
def get_reader(self, parser=string_to_df):
if self.reader is None:
self.reader = FileMultiReader(self.report_file, self.output_finished)
return PhantomReader(self.reader.get_file(), parser=parser)
def get_stats_reader(self):
if self.stats_reader is None:
self.stats_reader = PandoraStatsReader(self.expvar_enabled, self.expvar_port)
return self.stats_reader
def get_resource(self, resource, dst, permissions=0644):
opener = resource_manager.get_opener(resource)
if isinstance(opener, HttpOpener):
tmp_path = opener.download_file(True, try_ungzip=True)
shutil.copy(tmp_path, dst)
logger.info('Successfully moved resource %s', dst)
else:
dst = opener.get_filename
os.chmod(dst, permissions)
logger.info('Permissions on %s have changed %d', dst, permissions)
return dst
def prepare_test(self):
try:
console = self.core.get_plugin_of_type(ConsolePlugin)
except KeyError as ex:
logger.debug("Console not found: %s", ex)
console = None
if console:
widget = PandoraInfoWidget(self)
console.add_info_widget(widget)
self.core.job.aggregator.add_result_listener(widget)
def start_test(self):
args = [self.pandora_cmd] +\
(['-expvar'] if self.expvar else []) +\
[self.pandora_config_file]
if self.affinity:
self.core.__setup_affinity(self.affinity, args=args)
logger.info("Starting: %s", args)
self.start_time = time.time()
self.process_stderr_file = self.core.mkstemp(".log", "pandora_")
self.core.add_artifact_file(self.process_stderr_file)
self.process_stderr = open(self.process_stderr_file, 'w')
try:
self.process = subprocess.Popen(
args,
stderr=self.process_stderr,
stdout=self.process_stderr,
close_fds=True)
except OSError:
logger.debug(
"Unable to start Pandora binary. Args: %s", args, exc_info=True)
raise RuntimeError(
"Unable to start Pandora binary and/or file does not exist: %s" % args)
def is_test_finished(self):
retcode = self.process.poll()
if retcode is not None and retcode == 0:
logger.info("Pandora subprocess done its work successfully and finished w/ retcode 0")
self.output_finished.set()
return retcode
elif retcode is not None and retcode != 0:
lines_amount = 20
logger.info("Pandora finished with non-zero retcode. Last %s logs of Pandora log:", lines_amount)
self.output_finished.set()
last_log_contents = tail_lines(self.process_stderr_file, lines_amount)
for logline in last_log_contents:
logger.info(logline.strip('\n'))
return abs(retcode)
else:
return -1
def end_test(self, retcode):
if self.process and self.process.poll() is None:
logger.warning(
"Terminating worker process with PID %s", self.process.pid)
self.process.terminate()
if self.process_stderr:
self.process_stderr.close()
else:
logger.debug("Seems subprocess finished OK")
self.output_finished.set()
return retcode
class PandoraInfoWidget(AbstractInfoWidget):
''' Right panel widget '''
def __init__(self, pandora):
AbstractInfoWidget.__init__(self)
self.krutilka = ConsoleScreen.krutilka()
self.owner = pandora
self.reqps = 0
self.active = 0
def get_index(self):
return 0
def on_aggregated_data(self, data, stats):
self.reqps = stats["metrics"]["reqps"]
self.active = stats["metrics"]["instances"]
def render(self, screen):
text = " Pandora Test %s" % self.krutilka.next()
space = screen.right_panel_width - len(text) - 1
left_spaces = space / 2
right_spaces = space / 2
dur_seconds = int(time.time()) - int(self.owner.start_time)
duration = str(datetime.timedelta(seconds=dur_seconds))
template = screen.markup.BG_BROWN + '~' * left_spaces + \
text + ' ' + '~' * right_spaces + screen.markup.RESET + "\n"
template += "Command Line: %s\n"
template += " Duration: %s\n"
template += " Requests/s: %s\n"
template += " Active reqs: %s\n"
template += " Target: %s\n"
template += " Schedule: \n%s\n"
data = (
self.owner.pandora_cmd,
duration,
self.reqps,
self.active,
self.owner.address,
yaml.dump(self.owner.schedule)
)
return template % data
| lgpl-2.1 | -508,664,481,288,662,140 | 36.828125 | 109 | 0.582239 | false |
amyshi188/osf.io | api_tests/nodes/views/test_node_draft_registration_detail.py | 11 | 31591 | import hashlib, binascii
from nose.tools import * # flake8: noqa
from modularodm import Q
from website.models import MetaSchema
from website.project.metadata.schemas import LATEST_SCHEMA_VERSION
from api.base.settings.defaults import API_BASE
from website.settings import PREREG_ADMIN_TAG
from website.project.model import ensure_schemas
from test_node_draft_registration_list import DraftRegistrationTestCase
from tests.factories import (
ProjectFactory,
DraftRegistrationFactory,
AuthUserFactory,
RegistrationFactory
)
class TestDraftRegistrationDetail(DraftRegistrationTestCase):
def setUp(self):
super(TestDraftRegistrationDetail, self).setUp()
ensure_schemas()
self.schema = MetaSchema.find_one(
Q('name', 'eq', 'OSF-Standard Pre-Data Collection Registration') &
Q('schema_version', 'eq', LATEST_SCHEMA_VERSION)
)
self.draft_registration = DraftRegistrationFactory(
initiator=self.user,
registration_schema=self.schema,
branched_from=self.public_project
)
self.other_project = ProjectFactory(creator=self.user)
self.url = '/{}nodes/{}/draft_registrations/{}/'.format(API_BASE, self.public_project._id, self.draft_registration._id)
def test_admin_can_view_draft(self):
res = self.app.get(self.url, auth=self.user.auth)
assert_equal(res.status_code, 200)
data = res.json['data']
assert_equal(data['attributes']['registration_supplement'], self.schema._id)
assert_equal(data['id'], self.draft_registration._id)
assert_equal(data['attributes']['registration_metadata'], {})
def test_read_only_contributor_cannot_view_draft(self):
res = self.app.get(self.url, auth=self.read_only_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_read_write_contributor_cannot_view_draft(self):
res = self.app.get(self.url, auth=self.read_write_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_logged_in_non_contributor_cannot_view_draft(self):
res = self.app.get(self.url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_unauthenticated_user_cannot_view_draft(self):
res = self.app.get(self.url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_draft_must_be_branched_from_node_in_kwargs(self):
url = '/{}nodes/{}/draft_registrations/{}/'.format(API_BASE, self.other_project._id, self.draft_registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
errors = res.json['errors'][0]
assert_equal(errors['detail'], 'This draft registration is not created from the given node.')
def test_reviewer_can_see_draft_registration(self):
user = AuthUserFactory()
user.system_tags.append(PREREG_ADMIN_TAG)
user.save()
res = self.app.get(self.url, auth=user.auth)
assert_equal(res.status_code, 200)
data = res.json['data']
assert_equal(data['attributes']['registration_supplement'], self.schema._id)
assert_equal(data['id'], self.draft_registration._id)
assert_equal(data['attributes']['registration_metadata'], {})
class TestDraftRegistrationUpdate(DraftRegistrationTestCase):
def setUp(self):
super(TestDraftRegistrationUpdate, self).setUp()
ensure_schemas()
self.schema = MetaSchema.find_one(
Q('name', 'eq', 'OSF-Standard Pre-Data Collection Registration') &
Q('schema_version', 'eq', LATEST_SCHEMA_VERSION)
)
self.draft_registration = DraftRegistrationFactory(
initiator=self.user,
registration_schema=self.schema,
branched_from=self.public_project
)
self.prereg_schema = MetaSchema.find_one(
Q('name', 'eq', 'Prereg Challenge') &
Q('schema_version', 'eq', LATEST_SCHEMA_VERSION)
)
self.prereg_draft_registration = DraftRegistrationFactory(
initiator=self.user,
registration_schema=self.prereg_schema,
branched_from=self.public_project
)
self.registration_metadata = self.prereg_metadata(self.prereg_draft_registration)
self.other_project = ProjectFactory(creator=self.user)
self.url = '/{}nodes/{}/draft_registrations/{}/'.format(API_BASE, self.public_project._id, self.draft_registration._id)
self.payload = {
"data": {
"id": self.draft_registration._id,
"type": "draft_registrations",
"attributes": {
"registration_metadata": {
"datacompletion": {
"value": "No, data collection has not begun"
},
"looked": {
"value": "No"
},
"comments": {
"value": "This is my first registration."
}
}
}
}
}
def test_id_required_in_payload(self):
payload = {
"data": {
"type": "draft_registrations",
"attributes": {
"registration_metadata": {}
}
}
}
res = self.app.put_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
errors = res.json['errors'][0]
assert_equal(errors['source']['pointer'], '/data/id')
assert_equal(errors['detail'], 'This field may not be null.')
def test_admin_can_update_draft(self):
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
data = res.json['data']
assert_equal(data['attributes']['registration_supplement'], self.schema._id)
assert_equal(data['attributes']['registration_metadata'], self.payload['data']['attributes']['registration_metadata'])
def test_draft_must_be_branched_from_node(self):
url = '/{}nodes/{}/draft_registrations/{}/'.format(API_BASE, self.other_project._id, self.draft_registration._id)
res = self.app.put_json_api(url, self.payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
errors = res.json['errors'][0]
assert_equal(errors['detail'], 'This draft registration is not created from the given node.')
def test_read_only_contributor_cannot_update_draft(self):
res = self.app.put_json_api(self.url, self.payload, auth=self.read_only_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_read_write_contributor_cannot_update_draft(self):
res = self.app.put_json_api(self.url, self.payload, auth=self.read_write_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_logged_in_non_contributor_cannot_update_draft(self):
res = self.app.put_json_api(self.url, self.payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_unauthenticated_user_cannot_update_draft(self):
res = self.app.put_json_api(self.url, self.payload, expect_errors=True)
assert_equal(res.status_code, 401)
def test_registration_metadata_must_be_supplied(self):
self.payload['data']['attributes'] = {}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert_equal(res.status_code, 400)
assert_equal(errors['source']['pointer'], '/data/attributes/registration_metadata')
assert_equal(errors['detail'], 'This field is required.')
def test_registration_metadata_must_be_a_dictionary(self):
self.payload['data']['attributes']['registration_metadata'] = 'Registration data'
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert_equal(res.status_code, 400)
assert_equal(errors['source']['pointer'], '/data/attributes/registration_metadata')
assert_equal(errors['detail'], 'Expected a dictionary of items but got type "unicode".')
def test_registration_metadata_question_values_must_be_dictionaries(self):
self.payload['data']['attributes']['registration_metadata']['datacompletion'] = 'No, data collection has not begun'
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert_equal(res.status_code, 400)
assert_equal(errors['detail'], "u'No, data collection has not begun' is not of type 'object'")
def test_registration_metadata_question_keys_must_be_value(self):
self.payload['data']['attributes']['registration_metadata']['datacompletion'] = {
"incorrect_key": "No, data collection has not begun"
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert_equal(res.status_code, 400)
assert_equal(errors['detail'], "Additional properties are not allowed (u'incorrect_key' was unexpected)")
def test_question_in_registration_metadata_must_be_in_schema(self):
self.payload['data']['attributes']['registration_metadata']['q11'] = {
"value": "No, data collection has not begun"
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert_equal(res.status_code, 400)
assert_equal(errors['detail'], "Additional properties are not allowed (u'q11' was unexpected)")
def test_multiple_choice_question_value_must_match_value_in_schema(self):
self.payload['data']['attributes']['registration_metadata']['datacompletion'] = {
"value": "Nope, data collection has not begun"
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert_equal(res.status_code, 400)
assert_equal(errors['detail'], "u'Nope, data collection has not begun' is not one of [u'No, data collection has not begun', u'Yes, data collection is underway or complete']")
def test_cannot_update_registration_schema(self):
self.payload['data']['attributes']['registration_supplement'] = self.prereg_schema._id
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['registration_supplement'], self.schema._id)
def test_required_metaschema_questions_not_required_on_update(self):
url = '/{}nodes/{}/draft_registrations/{}/'.format(API_BASE, self.public_project._id, self.prereg_draft_registration._id)
del self.registration_metadata['q1']
self.prereg_draft_registration.registration_metadata = self.registration_metadata
self.prereg_draft_registration.save()
payload = {
"data": {
"id": self.prereg_draft_registration._id,
"type": "draft_registrations",
"attributes": {
"registration_metadata": {
'q2': {
'value': 'New response'
}
}
}
}
}
res = self.app.put_json_api(url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['registration_metadata']['q2']['value'], 'New response')
assert_not_in('q1', res.json['data']['attributes']['registration_metadata'])
def test_reviewer_can_update_draft_registration(self):
user = AuthUserFactory()
user.system_tags.append(PREREG_ADMIN_TAG)
user.save()
payload = {
"data": {
"id": self.prereg_draft_registration._id,
"type": "draft_registrations",
"attributes": {
"registration_metadata": {
'q2': {
'comments': [{'value': 'This is incomplete.'}]
}
}
}
}
}
url = '/{}nodes/{}/draft_registrations/{}/'.format(API_BASE, self.public_project._id, self.prereg_draft_registration._id)
res = self.app.put_json_api(url, payload, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['registration_metadata']['q2']['comments'][0]['value'], 'This is incomplete.')
assert_not_in('q1', res.json['data']['attributes']['registration_metadata'])
def test_reviewer_can_only_update_comment_fields_draft_registration(self):
user = AuthUserFactory()
user.system_tags.append(PREREG_ADMIN_TAG)
user.save()
payload = {
"data": {
"id": self.prereg_draft_registration._id,
"type": "draft_registrations",
"attributes": {
"registration_metadata": {
'q2': {
'value': 'Test response'
}
}
}
}
}
url = '/{}nodes/{}/draft_registrations/{}/'.format(API_BASE, self.public_project._id, self.prereg_draft_registration._id)
res = self.app.put_json_api(url, payload, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Additional properties are not allowed (u'value' was unexpected)")
def test_reviewer_can_update_nested_comment_fields_draft_registration(self):
user = AuthUserFactory()
user.system_tags.append(PREREG_ADMIN_TAG)
user.save()
payload = {
"data": {
"id": self.prereg_draft_registration._id,
"type": "draft_registrations",
"attributes": {
"registration_metadata": {
'q7': {
'value': {
'question': {
'comments': [{'value': 'Add some clarity here.'}]
}
}
}
}
}
}
}
url = '/{}nodes/{}/draft_registrations/{}/'.format(API_BASE, self.public_project._id, self.prereg_draft_registration._id)
res = self.app.put_json_api(url, payload, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['registration_metadata']['q7']['value']['question']['comments'][0]['value'], 'Add some clarity here.')
def test_reviewer_cannot_update_nested_value_fields_draft_registration(self):
user = AuthUserFactory()
user.system_tags.append(PREREG_ADMIN_TAG)
user.save()
payload = {
"data": {
"id": self.prereg_draft_registration._id,
"type": "draft_registrations",
"attributes": {
"registration_metadata": {
'q7': {
'value': {
'question': {
'value': 'This is the answer'
}
}
}
}
}
}
}
url = '/{}nodes/{}/draft_registrations/{}/'.format(API_BASE, self.public_project._id, self.prereg_draft_registration._id)
res = self.app.put_json_api(url, payload, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Additional properties are not allowed (u'value' was unexpected)")
class TestDraftRegistrationPatch(DraftRegistrationTestCase):
def setUp(self):
super(TestDraftRegistrationPatch, self).setUp()
ensure_schemas()
self.schema = MetaSchema.find_one(
Q('name', 'eq', 'OSF-Standard Pre-Data Collection Registration') &
Q('schema_version', 'eq', LATEST_SCHEMA_VERSION)
)
self.draft_registration = DraftRegistrationFactory(
initiator=self.user,
registration_schema=self.schema,
branched_from=self.public_project
)
self.prereg_schema = MetaSchema.find_one(
Q('name', 'eq', 'Prereg Challenge') &
Q('schema_version', 'eq', LATEST_SCHEMA_VERSION)
)
self.prereg_draft_registration = DraftRegistrationFactory(
initiator=self.user,
registration_schema=self.prereg_schema,
branched_from=self.public_project
)
self.registration_metadata = self.prereg_metadata(self.prereg_draft_registration)
self.other_project = ProjectFactory(creator=self.user)
self.url = '/{}nodes/{}/draft_registrations/{}/'.format(API_BASE, self.public_project._id, self.draft_registration._id)
self.payload = {
"data": {
"id": self.draft_registration._id,
"type": "draft_registrations",
"attributes": {
"registration_metadata": {
"datacompletion": {
"value": "No, data collection has not begun"
},
"looked": {
"value": "No"
},
"comments": {
"value": "This is my first registration."
}
}
}
}
}
def test_admin_can_update_draft(self):
res = self.app.patch_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
data = res.json['data']
assert_equal(data['attributes']['registration_supplement'], self.schema._id)
assert_equal(data['attributes']['registration_metadata'], self.payload['data']['attributes']['registration_metadata'])
def test_read_only_contributor_cannot_update_draft(self):
res = self.app.patch_json_api(self.url, self.payload, auth=self.read_only_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_read_write_contributor_cannot_update_draft(self):
res = self.app.patch_json_api(self.url, self.payload, auth=self.read_write_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_logged_in_non_contributor_cannot_update_draft(self):
res = self.app.patch_json_api(self.url, self.payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_unauthenticated_user_cannot_update_draft(self):
res = self.app.patch_json_api(self.url, self.payload, expect_errors=True)
assert_equal(res.status_code, 401)
class TestDraftRegistrationDelete(DraftRegistrationTestCase):
def setUp(self):
super(TestDraftRegistrationDelete, self).setUp()
ensure_schemas()
schema = MetaSchema.find_one(
Q('name', 'eq', 'OSF-Standard Pre-Data Collection Registration') &
Q('schema_version', 'eq', LATEST_SCHEMA_VERSION)
)
self.draft_registration = DraftRegistrationFactory(
initiator=self.user,
registration_schema=schema,
branched_from=self.public_project
)
self.other_project = ProjectFactory(creator=self.user)
self.url = '/{}nodes/{}/draft_registrations/{}/'.format(API_BASE, self.public_project._id, self.draft_registration._id)
def test_admin_can_delete_draft(self):
res = self.app.delete_json_api(self.url, auth=self.user.auth)
assert_equal(res.status_code, 204)
def test_read_only_contributor_cannot_delete_draft(self):
res = self.app.delete_json_api(self.url, auth=self.read_only_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_read_write_contributor_cannot_delete_draft(self):
res = self.app.delete_json_api(self.url, auth=self.read_write_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_logged_in_non_contributor_cannot_delete_draft(self):
res = self.app.delete_json_api(self.url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_unauthenticated_user_cannot_delete_draft(self):
res = self.app.delete_json_api(self.url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_draft_that_has_been_registered_cannot_be_deleted(self):
reg = RegistrationFactory(project=self.public_project)
self.draft_registration.registered_node = reg
self.draft_registration.save()
res = self.app.delete_json_api(self.url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'This draft has already been registered and cannot be modified.')
def test_reviewer_cannot_delete_draft_registration(self):
user = AuthUserFactory()
user.system_tags.append(PREREG_ADMIN_TAG)
user.save()
res = self.app.delete_json_api(self.url, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
class TestDraftPreregChallengeRegistrationMetadataValidation(DraftRegistrationTestCase):
def setUp(self):
super(TestDraftPreregChallengeRegistrationMetadataValidation, self).setUp()
ensure_schemas()
self.prereg_schema = MetaSchema.find_one(
Q('name', 'eq', 'Prereg Challenge') &
Q('schema_version', 'eq', LATEST_SCHEMA_VERSION)
)
self.prereg_draft_registration = DraftRegistrationFactory(
initiator=self.user,
registration_schema=self.prereg_schema,
branched_from=self.public_project
)
self.other_project = ProjectFactory(creator=self.user)
self.url = '/{}nodes/{}/draft_registrations/{}/'.format(API_BASE, self.public_project._id, self.prereg_draft_registration._id)
self.payload = {
"data": {
"id": self.prereg_draft_registration._id,
"type": "draft_registrations",
"attributes": {
"registration_metadata": {}
}
}
}
def test_first_level_open_ended_answers(self):
self.payload['data']['attributes']['registration_metadata']['q1'] = {
"value": "This is my answer."
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['registration_metadata']['q1']['value'], "This is my answer.")
def test_first_level_open_ended_answer_must_have_correct_key(self):
self.payload['data']['attributes']['registration_metadata']['q1'] = {
"values": "This is my answer."
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Additional properties are not allowed (u\'values\' was unexpected)")
def test_first_level_open_ended_answer_must_be_of_correct_type(self):
self.payload['data']['attributes']['registration_metadata']['q1'] = {
"value": 12345
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "12345 is not of type 'string'")
def test_first_level_open_ended_answer_not_expecting_more_nested_data(self):
self.payload['data']['attributes']['registration_metadata']['q1'] = {
"value": {
"question": {
"value": "This is my answer."
}
}
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "{u'question': {u'value': u'This is my answer.'}} is not of type 'string'")
def test_second_level_answers(self):
self.payload['data']['attributes']['registration_metadata']['q7'] = {
"value": {
"question": {
"value": "This is my answer."
}
}
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['registration_metadata']['q7']['value']['question']['value'], 'This is my answer.')
def test_second_level_open_ended_answer_must_have_correct_key(self):
self.payload['data']['attributes']['registration_metadata']['q7'] = {
"value": {
"questions": {
"value": "This is my answer."
}
}
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Additional properties are not allowed (u\'questions\' was unexpected)")
def test_third_level_open_ended_answer_must_have_correct_key(self):
self.payload['data']['attributes']['registration_metadata']['q7'] = {
"value": {
"question": {
"values": "This is my answer."
}
}
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Additional properties are not allowed (u\'values\' was unexpected)")
def test_second_level_open_ended_answer_must_have_correct_type(self):
self.payload['data']['attributes']['registration_metadata']['q7'] = {
"value": {
"question": "This is my answer"
}
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "u'This is my answer' is not of type 'object'")
def test_third_level_open_ended_answer_must_have_correct_type(self):
self.payload['data']['attributes']['registration_metadata']['q7'] = {
"value": {
"question": {
"value": True
}
}
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "True is not of type 'string'")
def test_uploader_metadata(self):
sha256 = hashlib.pbkdf2_hmac('sha256', b'password', b'salt', 100000)
self.payload['data']['attributes']['registration_metadata']['q7'] = {
"value": {
"uploader": {
"value": "Screen Shot 2016-03-30 at 7.02.05 PM.png",
"extra": [{
"data": {},
"nodeId": self.public_project._id,
"viewUrl": "/project/{}/files/osfstorage/{}".format(self.public_project._id, self.prereg_draft_registration._id),
"selectedFileName": "Screen Shot 2016-03-30 at 7.02.05 PM.png",
"sha256": binascii.hexlify(sha256)
}]
}
}
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['registration_metadata']['q7']['value']['uploader']['value'], "Screen Shot 2016-03-30 at 7.02.05 PM.png")
def test_uploader_metadata_incorrect_key(self):
sha256 = hashlib.pbkdf2_hmac('sha256', b'password', b'salt', 100000)
self.payload['data']['attributes']['registration_metadata']['q7'] = {
"value": {
"uploader": {
"value": "Screen Shot 2016-03-30 at 7.02.05 PM.png",
"extra": [{
"data": {},
"nodeId": self.public_project._id,
"viewUrl": "/project/{}/files/osfstorage/{}".format(self.public_project._id, self.prereg_draft_registration._id),
"selectedFileNames": "Screen Shot 2016-03-30 at 7.02.05 PM.png",
"sha256": binascii.hexlify(sha256)
}]
}
}
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Additional properties are not allowed (u\'selectedFileNames\' was unexpected)")
def test_multiple_choice_questions_incorrect_choice(self):
self.payload['data']['attributes']['registration_metadata']['q15'] = {
"value": "This is my answer."
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "u'This is my answer.' is not one of [u'No blinding is involved in this study.', "
"u'For studies that involve human subjects, they will not know the treatment group to which they have been assigned.', "
"u'Research personnel who interact directly with the study subjects (either human or non-human subjects) will not be aware of the assigned treatments.', "
"u'Research personnel who analyze the data collected from the study are not aware of the treatment applied to any given group.']")
def test_multiple_choice_questions(self):
self.payload['data']['attributes']['registration_metadata']['q15'] = {
"value": 'No blinding is involved in this study.'
}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['registration_metadata']['q15']['value'], 'No blinding is involved in this study.')
| apache-2.0 | -2,112,192,858,533,233,200 | 44.520173 | 208 | 0.588174 | false |
GiantSteps/essentia | test/src/unittest/highlevel/test_onsets.py | 10 | 3820 | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestOnsets(TestCase):
def testZero(self):
# zeros should return no onsets (empty array)
n = 10
detection = zeros(100).reshape(n,n)
weights = ones(n)
self.assertEqualMatrix(Onsets()(detection, weights), [])
def testConstantInput(self):
# constant detection function should return first position:
n = 10
detection = ones(100).reshape(n,n)
weights = ones(n)
size = 2048
sr = 44100.0
while (size > 32):
hopsize = size/2
# TODO there will be an onset detected on the first frame for a
# non-zero constant signal, which is probably ok
frameTime = (size - hopsize)/sr
frameRate = 1.0/frameTime
if (size == 1024 and hopsize == 512):
onset = Onsets(frameRate=frameRate)(detection, weights)
self.assertAlmostEqualVector(onset, array([frameTime]))
else:
# Onsets does not support other framerates than
# (1024-512)/44100
# Onsets() outputs a warning instead of exception from now on
# self.assertConfigureFails(Onsets(), { 'frameRate': frameRate })
pass
size /= 2
def testImpulse(self):
# Given an impulse should return its position
n = 10
detection = zeros(100).reshape(n,n)
for i in range(len(detection)):
detection[i, 5] = 1
detection[i, 4] = .2
detection[i, 6] = .3
weights = ones(n)
size = 2048
sr = 44100.0
while (size > 32):
hopsize = size/2
frameTime = (size - hopsize)/sr
frameRate = 1.0/frameTime
if (size == 1024 and hopsize == 512):
onset = Onsets(frameRate=frameRate)(detection, weights)
self.assertAlmostEqualVector( onset, array([4*frameTime]), 1e-5)
else:
# Onsets does not support other framerates than
# (1024-512)/44100
# self.assertConfigureFails(Onsets(), { 'frameRate': frameRate })
pass # from now on Onset returns a warning instead of exception
size /= 2
def testInvalidParam(self):
self.assertConfigureFails(Onsets(), { 'frameRate':-1 })
self.assertConfigureFails(Onsets(), { 'alpha': 2 })
self.assertConfigureFails(Onsets(), { 'delay': -1 })
self.assertConfigureFails(Onsets(), { 'silenceThreshold':10 })
def testEmpty(self):
# Empty input should raise an exception
self.assertComputeFails(Onsets(), array([[]]), [])
def testBadWeightSize(self):
weights = [1,2,3,4]
input = [[1,2,3,4],
[5,6,7,8],
[9,10,11,12]]
self.assertComputeFails(Onsets(), input, weights)
suite = allTests(TestOnsets)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 | -1,917,014,051,743,897,600 | 35.037736 | 81 | 0.596073 | false |
UMD-DRASTIC/drastic-web | nodes/views.py | 1 | 3309 | """Node views
"""
__copyright__ = "Copyright (C) 2016 University of Maryland"
__license__ = "GNU AFFERO GENERAL PUBLIC LICENSE, Version 3"
import uuid
import datetime
from django.core.exceptions import PermissionDenied
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .forms import NodeForm
from .client import NodeClient
from drastic.models import Node
from drastic.models.errors import NodeConflictError
import logging
logger = logging.getLogger("drastic")
@login_required
def home(request):
nodes = [n.to_dict() for n in Node.list()]
return render(request, 'nodes/index.html', {"nodes": nodes})
@login_required
def new(request):
form = NodeForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
try:
Node.create(name=form.cleaned_data["name"],
address=form.cleaned_data["address"])
messages.add_message(request, messages.INFO, 'New node was added')
except NodeConflictError:
messages.add_message(request, messages.ERROR, 'That name is already in use')
return HttpResponseRedirect(reverse('nodes:home'))
return render(request, 'nodes/new.html', {'form': form})
@login_required
def edit(request, id):
# TODO: Create the initial_data from the node itself, if we can
# find it.
node = Node.find_by_id(id)
initial_data = node.to_dict()
if request.method == 'POST':
form = NodeForm(request.POST)
if form.is_valid():
node.update(name=form.cleaned_data['name'], address=form.cleaned_data['address'])
messages.add_message(request, messages.INFO,
"Node information for '{}' has been changed".format(form.cleaned_data['name']))
return HttpResponseRedirect(reverse('nodes:home'))
else:
form = NodeForm(initial=initial_data)
return render(request, 'nodes/edit.html', {'form': form})
@login_required
def check(request, id):
node = Node.find_by_id(id)
client = NodeClient(node.address + ":9000")
ok, metrics = client.get_state()
if ok:
node.update(status="UP", last_update=datetime.datetime.now())
messages.add_message(request, messages.INFO, 'The node was reachable')
else:
messages.add_message(request, messages.WARNING, 'The node at {} was unreachable'.format(node.address))
node.update(status="DOWN", last_update=datetime.datetime.now())
return HttpResponseRedirect(reverse("nodes:home"))
@login_required
def metrics(request, id):
node = Node.find_by_id(id)
if not node or not request.user.administrator:
raise PermissionDenied()
client = NodeClient(node.address + ":9000")
ok, metrics = client.get_state()
if not ok:
messages.add_message(request, messages.WARNING, 'The node at {} was unreachable'.format(node.address))
return render(request, 'nodes/metrics.html', { "node": node, "metrics": metrics})
@login_required
def logview(request, id):
node = Node.find_by_id(id)
return render(request, 'nodes/logs.html', { "node": node})
| agpl-3.0 | 1,164,303,273,820,435,500 | 32.09 | 112 | 0.669387 | false |
WPMedia/dd-agent | agent.py | 3 | 18251 | #!/opt/datadog-agent/embedded/bin/python
"""
Datadog
www.datadoghq.com
----
Cloud-Scale Monitoring. Monitoring that tracks your dynamic infrastructure.
Licensed under Simplified BSD License (see LICENSE)
(C) Boxed Ice 2010 all rights reserved
(C) Datadog, Inc. 2010-2016 all rights reserved
"""
# set up logging before importing any other components
from config import get_version, initialize_logging # noqa
initialize_logging('collector')
# stdlib
import logging
import os
import signal
import sys
import time
from copy import copy
# For pickle & PID files, see issue 293
os.umask(022)
# project
from checks.check_status import CollectorStatus
from checks.collector import Collector
from config import (
get_config,
get_parsed_args,
get_system_stats,
load_check_directory,
load_check
)
from daemon import AgentSupervisor, Daemon
from emitter import http_emitter
# utils
from util import Watchdog
from utils.cloud_metadata import EC2
from utils.configcheck import configcheck, sd_configcheck
from utils.flare import Flare
from utils.hostname import get_hostname
from utils.jmx import jmx_command
from utils.pidfile import PidFile
from utils.profile import AgentProfiler
from utils.service_discovery.config_stores import get_config_store
from utils.service_discovery.sd_backend import get_sd_backend
# Constants
PID_NAME = "dd-agent"
PID_DIR = None
WATCHDOG_MULTIPLIER = 10
RESTART_INTERVAL = 4 * 24 * 60 * 60 # Defaults to 4 days
DEFAULT_COLLECTOR_PROFILE_INTERVAL = 20
# Globals
log = logging.getLogger('collector')
class Agent(Daemon):
"""
The agent class is a daemon that runs the collector in a background process.
"""
def __init__(self, pidfile, autorestart, start_event=True, in_developer_mode=False):
Daemon.__init__(self, pidfile, autorestart=autorestart)
self.run_forever = True
self.collector = None
self.start_event = start_event
self.in_developer_mode = in_developer_mode
self._agentConfig = {}
self._checksd = []
self.collector_profile_interval = DEFAULT_COLLECTOR_PROFILE_INTERVAL
self.check_frequency = None
# this flag can be set to True, False, or a list of checks (for partial reload)
self.reload_configs_flag = False
self.sd_backend = None
def _handle_sigterm(self, signum, frame):
"""Handles SIGTERM and SIGINT, which gracefully stops the agent."""
log.debug("Caught sigterm. Stopping run loop.")
self.run_forever = False
if self.collector:
self.collector.stop()
log.debug("Collector is stopped.")
def _handle_sigusr1(self, signum, frame):
"""Handles SIGUSR1, which signals an exit with an autorestart."""
self._handle_sigterm(signum, frame)
self._do_restart()
def _handle_sighup(self, signum, frame):
"""Handles SIGHUP, which signals a configuration reload."""
log.info("SIGHUP caught! Scheduling configuration reload before next collection run.")
self.reload_configs_flag = True
def reload_configs(self, checks_to_reload=set()):
"""Reload the agent configuration and checksd configurations.
Can also reload only an explicit set of checks."""
log.info("Attempting a configuration reload...")
hostname = get_hostname(self._agentConfig)
# if no check was given, reload them all
if not checks_to_reload:
log.debug("No check list was passed, reloading every check")
# stop checks
for check in self._checksd.get('initialized_checks', []):
check.stop()
self._checksd = load_check_directory(self._agentConfig, hostname)
else:
new_checksd = copy(self._checksd)
self.refresh_specific_checks(hostname, new_checksd, checks_to_reload)
# once the reload is done, replace existing checks with the new ones
self._checksd = new_checksd
# Logging
num_checks = len(self._checksd['initialized_checks'])
if num_checks > 0:
opt_msg = " (refreshed %s checks)" % len(checks_to_reload) if checks_to_reload else ''
msg = "Check reload was successful. Running {num_checks} checks{opt_msg}.".format(
num_checks=num_checks, opt_msg=opt_msg)
log.info(msg)
else:
log.info("No checksd configs found")
def refresh_specific_checks(self, hostname, checksd, checks):
"""take a list of checks and for each of them:
- remove it from the init_failed_checks if it was there
- load a fresh config for it
- replace its old config with the new one in initialized_checks if there was one
- disable the check if no new config was found
- otherwise, append it to initialized_checks
"""
for check_name in checks:
idx = None
for num, check in enumerate(checksd['initialized_checks']):
if check.name == check_name:
idx = num
# stop the existing check before reloading it
check.stop()
if not idx and check_name in checksd['init_failed_checks']:
# if the check previously failed to load, pop it from init_failed_checks
checksd['init_failed_checks'].pop(check_name)
fresh_check = load_check(self._agentConfig, hostname, check_name)
# this is an error dict
# checks that failed to load are added to init_failed_checks
# and poped from initialized_checks
if isinstance(fresh_check, dict) and 'error' in fresh_check.keys():
checksd['init_failed_checks'][fresh_check.keys()[0]] = fresh_check.values()[0]
if idx:
checksd['initialized_checks'].pop(idx)
elif not fresh_check:
# no instance left of it to monitor so the check was not loaded
if idx:
checksd['initialized_checks'].pop(idx)
# the check was not previously running so we were trying to instantiate it and it failed
else:
log.error("Configuration for check %s was not found, it won't be reloaded." % check_name)
# successfully reloaded check are added to initialized_checks
# (appended or replacing a previous version)
else:
if idx is not None:
checksd['initialized_checks'][idx] = fresh_check
# it didn't exist before and doesn't need to be replaced so we append it
else:
checksd['initialized_checks'].append(fresh_check)
@classmethod
def info(cls, verbose=None):
logging.getLogger().setLevel(logging.ERROR)
return CollectorStatus.print_latest_status(verbose=verbose)
def run(self, config=None):
"""Main loop of the collector"""
# Gracefully exit on sigterm.
signal.signal(signal.SIGTERM, self._handle_sigterm)
# A SIGUSR1 signals an exit with an autorestart
signal.signal(signal.SIGUSR1, self._handle_sigusr1)
# Handle Keyboard Interrupt
signal.signal(signal.SIGINT, self._handle_sigterm)
# A SIGHUP signals a configuration reload
signal.signal(signal.SIGHUP, self._handle_sighup)
# Save the agent start-up stats.
CollectorStatus().persist()
# Intialize the collector.
if not config:
config = get_config(parse_args=True)
self._agentConfig = self._set_agent_config_hostname(config)
hostname = get_hostname(self._agentConfig)
systemStats = get_system_stats(
proc_path=self._agentConfig.get('procfs_path', '/proc').rstrip('/')
)
emitters = self._get_emitters()
# Initialize service discovery
if self._agentConfig.get('service_discovery'):
self.sd_backend = get_sd_backend(self._agentConfig)
# Load the checks.d checks
self._checksd = load_check_directory(self._agentConfig, hostname)
# Initialize the Collector
self.collector = Collector(self._agentConfig, emitters, systemStats, hostname)
# In developer mode, the number of runs to be included in a single collector profile
try:
self.collector_profile_interval = int(
self._agentConfig.get('collector_profile_interval', DEFAULT_COLLECTOR_PROFILE_INTERVAL))
except ValueError:
log.warn('collector_profile_interval is invalid. '
'Using default value instead (%s).' % DEFAULT_COLLECTOR_PROFILE_INTERVAL)
self.collector_profile_interval = DEFAULT_COLLECTOR_PROFILE_INTERVAL
# Configure the watchdog.
self.check_frequency = int(self._agentConfig['check_freq'])
watchdog = self._get_watchdog(self.check_frequency)
# Initialize the auto-restarter
self.restart_interval = int(self._agentConfig.get('restart_interval', RESTART_INTERVAL))
self.agent_start = time.time()
profiled = False
collector_profiled_runs = 0
# Run the main loop.
while self.run_forever:
# Setup profiling if necessary
if self.in_developer_mode and not profiled:
try:
profiler = AgentProfiler()
profiler.enable_profiling()
profiled = True
except Exception as e:
log.warn("Cannot enable profiler: %s" % str(e))
if self.reload_configs_flag:
if isinstance(self.reload_configs_flag, set):
self.reload_configs(checks_to_reload=self.reload_configs_flag)
else:
self.reload_configs()
# Do the work. Pass `configs_reloaded` to let the collector know if it needs to
# look for the AgentMetrics check and pop it out.
self.collector.run(checksd=self._checksd,
start_event=self.start_event,
configs_reloaded=True if self.reload_configs_flag else False)
self.reload_configs_flag = False
# Look for change in the config template store.
# The self.sd_backend.reload_check_configs flag is set
# to True if a config reload is needed.
if self._agentConfig.get('service_discovery') and self.sd_backend and \
not self.sd_backend.reload_check_configs:
try:
self.sd_backend.reload_check_configs = get_config_store(
self._agentConfig).crawl_config_template()
except Exception as e:
log.warn('Something went wrong while looking for config template changes: %s' % str(e))
# Check if we should run service discovery
# The `reload_check_configs` flag can be set through the docker_daemon check or
# using ConfigStore.crawl_config_template
if self._agentConfig.get('service_discovery') and self.sd_backend and \
self.sd_backend.reload_check_configs:
self.reload_configs_flag = self.sd_backend.reload_check_configs
self.sd_backend.reload_check_configs = False
if profiled:
if collector_profiled_runs >= self.collector_profile_interval:
try:
profiler.disable_profiling()
profiled = False
collector_profiled_runs = 0
except Exception as e:
log.warn("Cannot disable profiler: %s" % str(e))
# Check if we should restart.
if self.autorestart and self._should_restart():
self._do_restart()
# Only plan for next loop if we will continue, otherwise exit quickly.
if self.run_forever:
if watchdog:
watchdog.reset()
if profiled:
collector_profiled_runs += 1
log.debug("Sleeping for {0} seconds".format(self.check_frequency))
time.sleep(self.check_frequency)
# Now clean-up.
try:
CollectorStatus.remove_latest_status()
except Exception:
pass
# Explicitly kill the process, because it might be running as a daemon.
log.info("Exiting. Bye bye.")
sys.exit(0)
def _get_emitters(self):
return [http_emitter]
def _get_watchdog(self, check_freq):
watchdog = None
if self._agentConfig.get("watchdog", True):
watchdog = Watchdog(check_freq * WATCHDOG_MULTIPLIER,
max_mem_mb=self._agentConfig.get('limit_memory_consumption', None))
watchdog.reset()
return watchdog
def _set_agent_config_hostname(self, agentConfig):
# Try to fetch instance Id from EC2 if not hostname has been set
# in the config file.
# DEPRECATED
if agentConfig.get('hostname') is None and agentConfig.get('use_ec2_instance_id'):
instanceId = EC2.get_instance_id(agentConfig)
if instanceId is not None:
log.info("Running on EC2, instanceId: %s" % instanceId)
agentConfig['hostname'] = instanceId
else:
log.info('Not running on EC2, using hostname to identify this server')
return agentConfig
def _should_restart(self):
if time.time() - self.agent_start > self.restart_interval:
return True
return False
def _do_restart(self):
log.info("Running an auto-restart.")
if self.collector:
self.collector.stop()
sys.exit(AgentSupervisor.RESTART_EXIT_STATUS)
def main():
options, args = get_parsed_args()
agentConfig = get_config(options=options)
autorestart = agentConfig.get('autorestart', False)
hostname = get_hostname(agentConfig)
in_developer_mode = agentConfig.get('developer_mode')
COMMANDS_AGENT = [
'start',
'stop',
'restart',
'status',
'foreground',
]
COMMANDS_NO_AGENT = [
'info',
'check',
'configcheck',
'jmx',
'flare',
]
COMMANDS = COMMANDS_AGENT + COMMANDS_NO_AGENT
if len(args) < 1:
sys.stderr.write("Usage: %s %s\n" % (sys.argv[0], "|".join(COMMANDS)))
return 2
command = args[0]
if command not in COMMANDS:
sys.stderr.write("Unknown command: %s\n" % command)
return 3
# TODO: actually kill the start/stop/restart/status command for 5.11
if command in ['start', 'stop', 'restart', 'status'] and not in_developer_mode:
logging.error('Please use supervisor to manage the agent')
return 1
if command in COMMANDS_AGENT:
agent = Agent(PidFile(PID_NAME, PID_DIR).get_path(), autorestart, in_developer_mode=in_developer_mode)
if 'start' == command:
log.info('Start daemon')
agent.start()
elif 'stop' == command:
log.info('Stop daemon')
agent.stop()
elif 'restart' == command:
log.info('Restart daemon')
agent.restart()
elif 'status' == command:
agent.status()
elif 'info' == command:
return Agent.info(verbose=options.verbose)
elif 'foreground' == command:
log.info('Agent version %s' % get_version())
if autorestart:
# Set-up the supervisor callbacks and fork it.
logging.info('Running Agent with auto-restart ON')
def child_func():
agent.start(foreground=True)
def parent_func():
agent.start_event = False
AgentSupervisor.start(parent_func, child_func)
else:
# Run in the standard foreground.
agent.start(foreground=True)
elif 'check' == command:
if len(args) < 2:
sys.stderr.write(
"Usage: %s check <check_name> [check_rate]\n"
"Add check_rate as last argument to compute rates\n"
% sys.argv[0]
)
return 1
check_name = args[1]
try:
import checks.collector
# Try the old-style check first
print getattr(checks.collector, check_name)(log).check(agentConfig)
except Exception:
# If not an old-style check, try checks.d
checks = load_check_directory(agentConfig, hostname)
for check in checks['initialized_checks']:
if check.name == check_name:
if in_developer_mode:
check.run = AgentProfiler.wrap_profiling(check.run)
cs = Collector.run_single_check(check, verbose=True)
print CollectorStatus.render_check_status(cs)
if len(args) == 3 and args[2] == 'check_rate':
print "Running 2nd iteration to capture rate metrics"
time.sleep(1)
cs = Collector.run_single_check(check, verbose=True)
print CollectorStatus.render_check_status(cs)
check.stop()
elif 'configcheck' == command or 'configtest' == command:
configcheck()
sd_configcheck(agentConfig)
elif 'jmx' == command:
jmx_command(args[1:], agentConfig)
elif 'flare' == command:
Flare.check_user_rights()
case_id = int(args[1]) if len(args) > 1 else None
f = Flare(True, case_id)
f.collect()
try:
f.upload()
except Exception as e:
print 'The upload failed:\n{0}'.format(str(e))
return 0
if __name__ == '__main__':
try:
sys.exit(main())
except StandardError:
# Try our best to log the error.
try:
log.exception("Uncaught error running the Agent")
except Exception:
pass
raise
| bsd-3-clause | -6,912,382,500,656,276,000 | 35.796371 | 110 | 0.596351 | false |
KostasLifeboy/RyzomCore | nel/tools/build_gamedata/processes/tiles/1_export.py | 3 | 3219 | #!/usr/bin/python
#
# \file 1_export.py
# \brief Export tiles
# \date 2009-03-10-21-31-GMT
# \author Jan Boon (Kaetemi)
# Python port of game data build pipeline.
# Export tiles
#
# NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/>
# Copyright (C) 2010 Winch Gate Property Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time, sys, os, shutil, subprocess, distutils.dir_util
sys.path.append("../../configuration")
if os.path.isfile("log.log"):
os.remove("log.log")
log = open("log.log", "w")
from scripts import *
from buildsite import *
from process import *
from tools import *
from directories import *
printLog(log, "")
printLog(log, "-------")
printLog(log, "--- Export tiles")
printLog(log, "-------")
printLog(log, time.strftime("%Y-%m-%d %H:%MGMT", time.gmtime(time.time())))
printLog(log, "")
# Find tools
TgaToDds = findTool(log, ToolDirectories, TgaToDdsTool, ToolSuffix)
ExecTimeout = findTool(log, ToolDirectories, ExecTimeoutTool, ToolSuffix)
printLog(log, "")
# For each tiles directory
printLog(log, ">>> Export tiles as DDS <<<")
if TgaToDds == "":
toolLogFail(log, TgaToDdsTool, ToolSuffix)
elif ExecTimeout == "":
toolLogFail(log, ExecTimeoutTool, ToolSuffix)
else:
mkPath(log, ExportBuildDirectory + "/" + TilesExportDirectory)
for dir in TilesSourceDirectories:
mkPath(log, DatabaseDirectory + "/" + dir)
files = findFiles(log, DatabaseDirectory + "/" + dir, "", ".tga")
for file in files:
sourceFile = DatabaseDirectory + "/" + dir + "/" + file
destFile = ExportBuildDirectory + "/" + TilesExportDirectory + "/" + os.path.basename(file)[0:-len(".tga")] + ".dds"
if needUpdateLogRemoveDest(log, sourceFile, destFile):
subprocess.call([ ExecTimeout, str(MapsBuildTimeout), TgaToDds, sourceFile, "-o", destFile, "-a", "5", "-m" ])
files = findFiles(log, DatabaseDirectory + "/" + dir, "", ".png")
for file in files:
sourceFile = DatabaseDirectory + "/" + dir + "/" + file
destFile = ExportBuildDirectory + "/" + TilesExportDirectory + "/" + os.path.basename(file)[0:-len(".png")] + ".dds"
if needUpdateLogRemoveDest(log, sourceFile, destFile):
subprocess.call([ ExecTimeout, str(MapsBuildTimeout), TgaToDds, sourceFile, "-o", destFile, "-a", "5", "-m" ])
#printLog(log, ">>> Copy PNG tiles <<<")
#mkPath(log, ExportBuildDirectory + "/" + TilesExportDirectory)
#for dir in TilesSourceDirectories:
# mkPath(log, DatabaseDirectory + "/" + dir)
# copyFilesExtNoTreeIfNeeded(log, DatabaseDirectory + "/" + dir, ExportBuildDirectory + "/" + TilesExportDirectory, ".png")
#printLog(log, "")
log.close()
# end of file
| agpl-3.0 | 5,192,780,766,586,293,000 | 37.321429 | 123 | 0.699596 | false |
alessandrod/twiggy | twiggy/logger.py | 1 | 9580 | from .message import Message
from .lib import iso8601time
import twiggy as _twiggy
import levels
import outputs
import formats
import warnings
import sys
import time
import traceback
from functools import wraps
def emit(level):
"""a decorator that emits at `level <.LogLevel>` after calling the method. The method
should return a `.Logger` instance.
For convenience, decorators for the various levels are available as
``emit.debug``, ``emit.info``, etc..
"""
def decorator(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
f(self, *args, **kwargs)._emit(level, '', [], {})
return wrapper
return decorator
emit.debug = emit(levels.DEBUG)
emit.info = emit(levels.INFO)
emit.notice = emit(levels.NOTICE)
emit.warning = emit(levels.WARNING)
emit.error = emit(levels.ERROR)
emit.critical = emit(levels.CRITICAL)
class BaseLogger(object):
"""Base class for loggers"""
__slots__ = ['_fields', '_options', 'min_level']
__valid_options = set(Message._default_options)
def __init__(self, fields = None, options = None, min_level = None):
"""Constructor for internal module use only, basically.
``fields`` and ``options`` will be copied.
"""
self._fields = fields.copy() if fields is not None else {}
self._options = options.copy() if options is not None else Message._default_options.copy()
self.min_level = min_level if min_level is not None else levels.DEBUG
def _clone(self):
return self.__class__(fields = self._fields, options = self._options, min_level = self.min_level)
def _emit(self, level, format_spec, args, kwargs):
raise NotImplementedError
## The Magic
def fields(self, **kwargs):
"""bind fields for structured logging"""
return self.fields_dict(kwargs)
def fields_dict(self, d):
"""bind fields for structured logging.
Use this instead of `.fields` if you have keys which are not valid Python identifiers.
"""
clone = self._clone()
clone._fields.update(d)
return clone
def options(self, **kwargs):
"""bind option for message creation."""
bad_options = set(kwargs) - self.__valid_options
if bad_options:
raise ValueError("Invalid options {0!r}".format(tuple(bad_options)))
clone = self._clone()
clone._options.update(kwargs)
return clone
## Convenience
def trace(self, trace='error'):
"""convenience method to enable traceback logging"""
return self.options(trace=trace)
def name(self, name):
"""convenvience method to bind ``name`` field"""
return self.fields(name=name)
## Do something
def debug(self, format_spec = '', *args, **kwargs):
"""Emit at ``DEBUG`` level"""
self._emit(levels.DEBUG, format_spec, args, kwargs)
def info(self, format_spec = '', *args, **kwargs):
"""Emit at ``INFO`` level"""
self._emit(levels.INFO, format_spec, args, kwargs)
def notice(self, format_spec = '', *args, **kwargs):
"""Emit at ``NOTICE`` level"""
self._emit(levels.NOTICE, format_spec, args, kwargs)
return True
def warning(self, format_spec = '', *args, **kwargs):
"""Emit at ``WARNING`` level"""
self._emit(levels.WARNING, format_spec, args, kwargs)
def error(self, format_spec = '', *args, **kwargs):
"""Emit at ``ERROR`` level"""
self._emit(levels.ERROR, format_spec, args, kwargs)
def critical(self, format_spec = '', *args, **kwargs):
"""Emit at ``CRITICAL`` level"""
self._emit(levels.CRITICAL, format_spec, args, kwargs)
class InternalLogger(BaseLogger):
"""Special-purpose logger for internal uses. Sends messages directly to output, bypassing :data:`.emitters`.
:ivar `Output` output: an output to write to
"""
__slots__ = ['output']
def __init__(self, output, fields = None, options = None, min_level = None):
super(InternalLogger, self).__init__(fields, options, min_level)
self.output = output
def _clone(self):
return self.__class__(fields = self._fields, options = self._options,
min_level = self.min_level, output = self.output)
def _emit(self, level, format_spec, args, kwargs):
"""does work of emitting - for internal use"""
if level < self.min_level: return
try:
try:
msg = Message(level, format_spec, self._fields.copy(), self._options.copy(), args, kwargs)
except StandardError:
msg = None
raise
else:
self.output.output(msg)
except StandardError:
print>>sys.stderr, iso8601time(), "Error in twiggy internal log! Something is serioulsy broken."
print>>sys.stderr, "Offending message:", repr(msg)
traceback.print_exc(file = sys.stderr)
class Logger(BaseLogger):
"""Logger for end-users"""
__slots__ = ['_emitters', 'filter']
def _feature_noop(self, *args, **kwargs):
return self._clone()
@classmethod
def addFeature(cls, func, name=None):
"""add a feature to the class
:arg func: the function to add
:arg string name: the name to add it under. If None, use the function's name.
"""
warnings.warn("Use of features is currently discouraged, pending refactoring", RuntimeWarning)
name = name if name is not None else func.__name__
setattr(cls, name, func)
@classmethod
def disableFeature(cls, name):
"""disable a feature.
A method will still exist by this name, but it won't do anything.
:arg string name: the name of the feature to disable.
"""
warnings.warn("Use of features is currently discouraged, pending refactoring", RuntimeWarning)
# get func directly from class dict - we don't want an unbound method.
setattr(cls, name, cls.__dict__['_feature_noop'])
@classmethod
def delFeature(cls, name):
"""delete a feature entirely
:arg string name: the name of the feature to remove
"""
warnings.warn("Use of features is currently discouraged, pending refactoring", RuntimeWarning)
delattr(cls, name)
def __init__(self, fields = None, options = None, emitters = None,
min_level = None, filter = None):
super(Logger, self).__init__(fields, options, min_level)
#: a dict of emitters
self._emitters = emitters if emitters is not None else {}
self.filter = filter if filter is not None else lambda format_spec: True
def _clone(self):
"""return a new Logger instance with copied attributes
Probably only for internal use.
"""
return self.__class__(fields = self._fields, options = self._options,
emitters = self._emitters, min_level = self.min_level,
filter = self.filter)
@emit.info
def struct(self, **kwargs):
"""convenience method for structured logging.
Calls fields() and emits at INFO
"""
return self.fields(**kwargs)
@emit.info
def struct_dict(self, d):
"""convenience method for structured logging.
Use instead of struct() if you have keys which are not valid Python identifiers
"""
return self.fields_dict(d)
## Boring stuff
def _emit(self, level, format_spec, args, kwargs):
"""does the work of emitting - for internal use"""
# XXX should these traps be collapsed?
if level < self.min_level: return
try:
if not self.filter(format_spec): return
except StandardError:
_twiggy.internal_log.info("Error in Logger filtering with {0} on {1}", repr(self.filter), format_spec)
# just continue emitting in face of filter error
# XXX should we trap here too b/c of "Dictionary changed size during iteration" (or other rare errors?)
potential_emitters = [(name, emitter) for name, emitter in self._emitters.iteritems()
if level >= emitter.min_level]
if not potential_emitters: return
try:
msg = Message(level, format_spec, self._fields.copy(), self._options.copy(), args, kwargs)
except StandardError:
# XXX use .fields() instead?
_twiggy.internal_log.info("Error formatting message level: {0!r}, format: {1!r}, fields: {2!r}, "\
"options: {3!r}, args: {4!r}, kwargs: {5!r}",
level, format_spec, self._fields, self._options, args, kwargs)
return
outputs = set()
# sort to make things deterministic (for tests, mainly)
for name, emitter in sorted(potential_emitters):
try:
include = emitter.filter(msg)
except StandardError:
_twiggy.internal_log.info("Error filtering with emitter {}. Filter: {} Message: {!r}",
name, repr(emitter.filter), msg)
include = True # output anyway if error
if include: outputs.add(emitter._output)
for o in outputs:
try:
o.output(msg)
except StandardError:
_twiggy.internal_log.warning("Error outputting with {0!r}. Message: {1!r}", o, msg)
| bsd-3-clause | -4,509,792,448,662,158,300 | 34.746269 | 114 | 0.595616 | false |
pozetroninc/micropython | ports/nrf/boards/make-pins.py | 6 | 14336 | #!/usr/bin/env python
"""Creates the pin file for the nRF5."""
from __future__ import print_function
import argparse
import sys
import csv
SUPPORTED_FN = {"UART": ["RX", "TX", "CTS", "RTS"]}
def parse_pin(name_str):
"""Parses a string and returns a pin-num."""
if len(name_str) < 1:
raise ValueError("Expecting pin name to be at least 4 charcters.")
if name_str[0] != "P":
raise ValueError("Expecting pin name to start with P")
pin_str = name_str[1:].split("/")[0]
if not pin_str.isdigit():
raise ValueError("Expecting numeric pin number.")
return int(pin_str)
def split_name_num(name_num):
num = None
for num_idx in range(len(name_num) - 1, -1, -1):
if not name_num[num_idx].isdigit():
name = name_num[0 : num_idx + 1]
num_str = name_num[num_idx + 1 :]
if len(num_str) > 0:
num = int(num_str)
break
return name, num
class AlternateFunction(object):
"""Holds the information associated with a pins alternate function."""
def __init__(self, idx, af_str):
self.idx = idx
self.af_str = af_str
self.func = ""
self.fn_num = None
self.pin_type = ""
self.supported = False
af_words = af_str.split("_", 1)
self.func, self.fn_num = split_name_num(af_words[0])
if len(af_words) > 1:
self.pin_type = af_words[1]
if self.func in SUPPORTED_FN:
pin_types = SUPPORTED_FN[self.func]
if self.pin_type in pin_types:
self.supported = True
def is_supported(self):
return self.supported
def ptr(self):
"""Returns the numbered function (i.e. USART6) for this AF."""
if self.fn_num is None:
return self.func
return "{:s}{:d}".format(self.func, self.fn_num)
def mux_name(self):
return "AF{:d}_{:s}".format(self.idx, self.ptr())
def print(self):
"""Prints the C representation of this AF."""
if self.supported:
print(" AF", end="")
else:
print(" //", end="")
fn_num = self.fn_num
if fn_num is None:
fn_num = 0
print(
"({:2d}, {:8s}, {:2d}, {:10s}, {:8s}), // {:s}".format(
self.idx, self.func, fn_num, self.pin_type, self.ptr(), self.af_str
)
)
def qstr_list(self):
return [self.mux_name()]
class Pin(object):
"""Holds the information associated with a pin."""
def __init__(self, pin):
self.pin = pin
self.alt_fn = []
self.alt_fn_count = 0
self.adc_num = 0
self.adc_channel = 0
self.board_pin = False
self.board_index = None
def cpu_pin_name(self):
return "{:s}{:d}".format("P", self.pin)
def is_board_pin(self):
return self.board_pin
def set_is_board_pin(self):
self.board_pin = True
def set_board_index(self, index):
self.board_index = index
def parse_adc(self, adc_str):
if adc_str[:3] != "ADC":
return
(adc, channel) = adc_str.split("_")
for idx in range(3, len(adc)):
self.adc_num = int(adc[idx])
self.adc_channel = int(channel[2:])
def parse_af(self, af_idx, af_strs_in):
if len(af_strs_in) == 0:
return
# If there is a slash, then the slash separates 2 aliases for the
# same alternate function.
af_strs = af_strs_in.split("/")
for af_str in af_strs:
alt_fn = AlternateFunction(af_idx, af_str)
self.alt_fn.append(alt_fn)
if alt_fn.is_supported():
self.alt_fn_count += 1
def alt_fn_name(self, null_if_0=False):
if null_if_0 and self.alt_fn_count == 0:
return "NULL"
return "pin_{:s}_af".format(self.cpu_pin_name())
def adc_num_str(self):
str = ""
for adc_num in range(1, 4):
if self.adc_num & (1 << (adc_num - 1)):
if len(str) > 0:
str += " | "
str += "PIN_ADC"
str += chr(ord("0") + adc_num)
if len(str) == 0:
str = "0"
return str
def print_const_table_entry(self):
print(
" PIN({:d}, {:s}, {:s}, {:d}),".format(
self.pin, self.alt_fn_name(null_if_0=True), self.adc_num_str(), self.adc_channel
)
)
def print(self):
if self.alt_fn_count == 0:
print("// ", end="")
print("const pin_af_obj_t {:s}[] = {{".format(self.alt_fn_name()))
for alt_fn in self.alt_fn:
alt_fn.print()
if self.alt_fn_count == 0:
print("// ", end="")
print("};")
print("")
print(
"const pin_obj_t pin_{:s} = PIN({:d}, {:s}, {:s}, {:d});".format(
self.cpu_pin_name(),
self.pin,
self.alt_fn_name(null_if_0=True),
self.adc_num_str(),
self.adc_channel,
)
)
print("")
def print_header(self, hdr_file):
hdr_file.write("extern const pin_obj_t pin_{:s};\n".format(self.cpu_pin_name()))
if self.alt_fn_count > 0:
hdr_file.write(
"extern const pin_af_obj_t pin_{:s}_af[];\n".format(self.cpu_pin_name())
)
def qstr_list(self):
result = []
for alt_fn in self.alt_fn:
if alt_fn.is_supported():
result += alt_fn.qstr_list()
return result
class NamedPin(object):
def __init__(self, name, pin):
self._name = name
self._pin = pin
def pin(self):
return self._pin
def name(self):
return self._name
class Pins(object):
def __init__(self):
self.cpu_pins = [] # list of NamedPin objects
self.board_pins = [] # list of NamedPin objects
def find_pin(self, pin_num):
for named_pin in self.cpu_pins:
pin = named_pin.pin()
if pin.pin == pin_num:
return pin
def parse_af_file(self, filename, pinname_col, af_col, af_col_end):
with open(filename, "r") as csvfile:
rows = csv.reader(csvfile)
for row in rows:
try:
pin_num = parse_pin(row[pinname_col])
except:
continue
pin = Pin(pin_num)
for af_idx in range(af_col, len(row)):
if af_idx < af_col_end:
pin.parse_af(af_idx - af_col, row[af_idx])
elif af_idx == af_col_end:
pin.parse_adc(row[af_idx])
self.cpu_pins.append(NamedPin(pin.cpu_pin_name(), pin))
def parse_board_file(self, filename):
with open(filename, "r") as csvfile:
rows = csv.reader(csvfile)
for row in rows:
try:
pin_num = parse_pin(row[1])
except:
continue
pin = self.find_pin(pin_num)
if pin:
pin.set_is_board_pin()
self.board_pins.append(NamedPin(row[0], pin))
def print_named(self, label, named_pins):
print(
"STATIC const mp_rom_map_elem_t pin_{:s}_pins_locals_dict_table[] = {{".format(label)
)
for named_pin in named_pins:
pin = named_pin.pin()
if pin.is_board_pin():
print(
" {{ MP_ROM_QSTR(MP_QSTR_{:s}), MP_ROM_PTR(&machine_board_pin_obj[{:d}]) }},".format(
named_pin.name(), pin.board_index
)
)
print("};")
print(
"MP_DEFINE_CONST_DICT(pin_{:s}_pins_locals_dict, pin_{:s}_pins_locals_dict_table);".format(
label, label
)
)
def print_const_table(self):
num_board_pins = 0
for named_pin in self.cpu_pins:
pin = named_pin.pin()
if pin.is_board_pin():
pin.set_board_index(num_board_pins)
num_board_pins += 1
print("")
print("const uint8_t machine_pin_num_of_board_pins = {:d};".format(num_board_pins))
print("")
print("const pin_obj_t machine_board_pin_obj[{:d}] = {{".format(num_board_pins))
for named_pin in self.cpu_pins:
pin = named_pin.pin()
if pin.is_board_pin():
pin.print_const_table_entry()
print("};")
def print(self):
self.print_named("cpu", self.cpu_pins)
print("")
self.print_named("board", self.board_pins)
def print_adc(self, adc_num):
print("")
print("const pin_obj_t * const pin_adc{:d}[] = {{".format(adc_num))
for channel in range(16):
adc_found = False
for named_pin in self.cpu_pins:
pin = named_pin.pin()
if (
pin.is_board_pin()
and (pin.adc_num & (1 << (adc_num - 1)))
and (pin.adc_channel == channel)
):
print(" &pin_{:s}, // {:d}".format(pin.cpu_pin_name(), channel))
adc_found = True
break
if not adc_found:
print(" NULL, // {:d}".format(channel))
print("};")
def print_header(self, hdr_filename):
with open(hdr_filename, "wt") as hdr_file:
for named_pin in self.cpu_pins:
pin = named_pin.pin()
if pin.is_board_pin():
pin.print_header(hdr_file)
hdr_file.write("extern const pin_obj_t * const pin_adc1[];\n")
hdr_file.write("extern const pin_obj_t * const pin_adc2[];\n")
hdr_file.write("extern const pin_obj_t * const pin_adc3[];\n")
def print_qstr(self, qstr_filename):
with open(qstr_filename, "wt") as qstr_file:
qstr_set = set([])
for named_pin in self.cpu_pins:
pin = named_pin.pin()
if pin.is_board_pin():
qstr_set |= set(pin.qstr_list())
qstr_set |= set([named_pin.name()])
for named_pin in self.board_pins:
qstr_set |= set([named_pin.name()])
for qstr in sorted(qstr_set):
print("Q({})".format(qstr), file=qstr_file)
def print_af_hdr(self, af_const_filename):
with open(af_const_filename, "wt") as af_const_file:
af_hdr_set = set([])
mux_name_width = 0
for named_pin in self.cpu_pins:
pin = named_pin.pin()
if pin.is_board_pin():
for af in pin.alt_fn:
if af.is_supported():
mux_name = af.mux_name()
af_hdr_set |= set([mux_name])
if len(mux_name) > mux_name_width:
mux_name_width = len(mux_name)
for mux_name in sorted(af_hdr_set):
key = "MP_ROM_QSTR(MP_QSTR_{}),".format(mux_name)
val = "MP_ROM_INT(GPIO_{})".format(mux_name)
print(" { %-*s %s }," % (mux_name_width + 26, key, val), file=af_const_file)
def print_af_py(self, af_py_filename):
with open(af_py_filename, "wt") as af_py_file:
print("PINS_AF = (", file=af_py_file)
for named_pin in self.board_pins:
print(" ('%s', " % named_pin.name(), end="", file=af_py_file)
for af in named_pin.pin().alt_fn:
if af.is_supported():
print("(%d, '%s'), " % (af.idx, af.af_str), end="", file=af_py_file)
print("),", file=af_py_file)
print(")", file=af_py_file)
def main():
parser = argparse.ArgumentParser(
prog="make-pins.py",
usage="%(prog)s [options] [command]",
description="Generate board specific pin file",
)
parser.add_argument(
"-a",
"--af",
dest="af_filename",
help="Specifies the alternate function file for the chip",
default="nrf.csv",
)
parser.add_argument(
"--af-const",
dest="af_const_filename",
help="Specifies header file for alternate function constants.",
default="build/pins_af_const.h",
)
parser.add_argument(
"--af-py",
dest="af_py_filename",
help="Specifies the filename for the python alternate function mappings.",
default="build/pins_af.py",
)
parser.add_argument(
"-b", "--board", dest="board_filename", help="Specifies the board file",
)
parser.add_argument(
"-p",
"--prefix",
dest="prefix_filename",
help="Specifies beginning portion of generated pins file",
default="nrf52_prefix.c",
)
parser.add_argument(
"-q",
"--qstr",
dest="qstr_filename",
help="Specifies name of generated qstr header file",
default="build/pins_qstr.h",
)
parser.add_argument(
"-r",
"--hdr",
dest="hdr_filename",
help="Specifies name of generated pin header file",
default="build/pins.h",
)
args = parser.parse_args(sys.argv[1:])
pins = Pins()
print("// This file was automatically generated by make-pins.py")
print("//")
if args.af_filename:
print("// --af {:s}".format(args.af_filename))
pins.parse_af_file(args.af_filename, 1, 2, 2)
if args.board_filename:
print("// --board {:s}".format(args.board_filename))
pins.parse_board_file(args.board_filename)
if args.prefix_filename:
print("// --prefix {:s}".format(args.prefix_filename))
print("")
with open(args.prefix_filename, "r") as prefix_file:
print(prefix_file.read())
pins.print_const_table()
pins.print()
pins.print_header(args.hdr_filename)
pins.print_qstr(args.qstr_filename)
pins.print_af_hdr(args.af_const_filename)
pins.print_af_py(args.af_py_filename)
if __name__ == "__main__":
main()
| mit | -2,772,880,457,850,103,300 | 31.880734 | 106 | 0.495536 | false |
PolicyStat/django | tests/i18n/patterns/tests.py | 27 | 13685 | from __future__ import unicode_literals
import os
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse, clear_url_caches, set_script_prefix
from django.http import HttpResponsePermanentRedirect
from django.middleware.locale import LocaleMiddleware
from django.test import TestCase, override_settings
from django.template import Template, Context
from django.utils._os import upath
from django.utils import translation
class PermanentRedirectLocaleMiddleWare(LocaleMiddleware):
response_redirect_class = HttpResponsePermanentRedirect
@override_settings(
USE_I18N=True,
LOCALE_PATHS=(
os.path.join(os.path.dirname(upath(__file__)), 'locale'),
),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
LANGUAGE_CODE='en-us',
LANGUAGES=(
('nl', 'Dutch'),
('en', 'English'),
('pt-br', 'Brazilian Portuguese'),
),
MIDDLEWARE_CLASSES=(
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
),
ROOT_URLCONF='i18n.patterns.urls.default',
)
class URLTestCaseBase(TestCase):
"""
TestCase base-class for the URL tests.
"""
def setUp(self):
# Make sure the cache is empty before we are doing our tests.
clear_url_caches()
def tearDown(self):
# Make sure we will leave an empty cache for other testcases.
clear_url_caches()
class URLPrefixTests(URLTestCaseBase):
"""
Tests if the `i18n_patterns` is adding the prefix correctly.
"""
def test_not_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
with translation.override('nl'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
def test_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/en/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/nl/prefixed/')
@override_settings(ROOT_URLCONF='i18n.patterns.urls.wrong')
def test_invalid_prefix_use(self):
self.assertRaises(ImproperlyConfigured, lambda: reverse('account:register'))
@override_settings(ROOT_URLCONF='i18n.patterns.urls.disabled')
class URLDisabledTests(URLTestCaseBase):
@override_settings(USE_I18N=False)
def test_prefixed_i18n_disabled(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
@override_settings(ROOT_URLCONF='i18n.patterns.urls.path_unused')
class PathUnusedTests(URLTestCaseBase):
"""
Check that if no i18n_patterns is used in root urlconfs, then no
language activation happens based on url prefix.
"""
def test_no_lang_activate(self):
response = self.client.get('/nl/foo/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
class URLTranslationTests(URLTestCaseBase):
"""
Tests if the pattern-strings are translated correctly (within the
`i18n_patterns` and the normal `patterns` function).
"""
def test_no_prefix_translated(self):
with translation.override('en'):
self.assertEqual(reverse('no-prefix-translated'), '/translated/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/translated/yeah/')
with translation.override('nl'):
self.assertEqual(reverse('no-prefix-translated'), '/vertaald/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/vertaald/yeah/')
with translation.override('pt-br'):
self.assertEqual(reverse('no-prefix-translated'), '/traduzidos/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/traduzidos/yeah/')
def test_users_url(self):
with translation.override('en'):
self.assertEqual(reverse('users'), '/en/users/')
with translation.override('nl'):
self.assertEqual(reverse('users'), '/nl/gebruikers/')
self.assertEqual(reverse('prefixed_xml'), '/nl/prefixed.xml')
with translation.override('pt-br'):
self.assertEqual(reverse('users'), '/pt-br/usuarios/')
class URLNamespaceTests(URLTestCaseBase):
"""
Tests if the translations are still working within namespaces.
"""
def test_account_register(self):
with translation.override('en'):
self.assertEqual(reverse('account:register'), '/en/account/register/')
with translation.override('nl'):
self.assertEqual(reverse('account:register'), '/nl/profiel/registeren/')
class URLRedirectTests(URLTestCaseBase):
"""
Tests if the user gets redirected to the right URL when there is no
language-prefix in the request URL.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_en_redirect_wrong_url(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
def test_nl_redirect(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertRedirects(response, '/nl/profiel/registeren/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_nl_redirect_wrong_url(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertEqual(response.status_code, 404)
def test_pt_br_redirect(self):
response = self.client.get('/conta/registre-se/', HTTP_ACCEPT_LANGUAGE='pt-br')
self.assertRedirects(response, '/pt-br/conta/registre-se/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_pl_pl_redirect(self):
# language from outside of the supported LANGUAGES list
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='pl-pl')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
@override_settings(
MIDDLEWARE_CLASSES=(
'i18n.patterns.tests.PermanentRedirectLocaleMiddleWare',
'django.middleware.common.CommonMiddleware',
),
)
def test_custom_redirect_class(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/', 301)
class URLVaryAcceptLanguageTests(URLTestCaseBase):
"""
Tests that 'Accept-Language' is not added to the Vary header when using
prefixed URLs.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Vary'), 'Accept-Language')
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
self.assertFalse(response.get('Vary'))
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
self.assertFalse(response.get('Vary'))
class URLRedirectWithoutTrailingSlashTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=True`).
"""
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/not-prefixed/', 301)
def test_en_redirect(self):
response = self.client.get('/account/register', HTTP_ACCEPT_LANGUAGE='en', follow=True)
# target status code of 301 because of CommonMiddleware redirecting
self.assertIn(('http://testserver/en/account/register/', 301), response.redirect_chain)
self.assertRedirects(response, '/en/account/register/', 302)
response = self.client.get('/prefixed.xml', HTTP_ACCEPT_LANGUAGE='en', follow=True)
self.assertRedirects(response, '/en/prefixed.xml', 302)
class URLRedirectWithoutTrailingSlashSettingTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=False`).
"""
@override_settings(APPEND_SLASH=False)
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=False)
def test_en_redirect(self):
response = self.client.get('/account/register-without-slash', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register-without-slash', 302)
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
class URLResponseTests(URLTestCaseBase):
"""
Tests if the response has the right language-code.
"""
def test_not_prefixed_with_prefix(self):
response = self.client.get('/en/not-prefixed/')
self.assertEqual(response.status_code, 404)
def test_en_url(self):
response = self.client.get('/en/account/register/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
def test_nl_url(self):
response = self.client.get('/nl/profiel/registeren/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'nl')
self.assertEqual(response.context['LANGUAGE_CODE'], 'nl')
def test_wrong_en_prefix(self):
response = self.client.get('/en/profiel/registeren/')
self.assertEqual(response.status_code, 404)
def test_wrong_nl_prefix(self):
response = self.client.get('/nl/account/register/')
self.assertEqual(response.status_code, 404)
def test_pt_br_url(self):
response = self.client.get('/pt-br/conta/registre-se/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'pt-br')
self.assertEqual(response.context['LANGUAGE_CODE'], 'pt-br')
class URLRedirectWithScriptAliasTests(URLTestCaseBase):
"""
#21579 - LocaleMiddleware should respect the script prefix.
"""
def setUp(self):
super(URLRedirectWithScriptAliasTests, self).setUp()
self.script_prefix = '/script_prefix'
set_script_prefix(self.script_prefix)
def tearDown(self):
super(URLRedirectWithScriptAliasTests, self).tearDown()
# reset script prefix
set_script_prefix('')
def test_language_prefix_with_script_prefix(self):
response = self.client.get('/prefixed/', HTTP_ACCEPT_LANGUAGE='en', SCRIPT_NAME=self.script_prefix)
self.assertRedirects(response, '%s/en/prefixed/' % self.script_prefix, target_status_code=404)
class URLTagTests(URLTestCaseBase):
"""
Test if the language tag works.
"""
def test_strings_only(self):
t = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(t.render(Context({})).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_context(self):
ctx = Context({'lang1': 'nl', 'lang2': 'pt-br'})
tpl = Template("""{% load i18n %}
{% language lang1 %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language lang2 %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(tpl.render(ctx).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_args(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
def test_kwargs(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
| bsd-3-clause | -8,116,618,428,998,594,000 | 38.897959 | 112 | 0.65232 | false |
Caseyftw/astronet | astronet/util/process.py | 1 | 1274 | '''
Created on 26.07.2016
@author: fgieseke
'''
import multiprocessing
def _wrapped_task(proc_num, task, args, kwargs, return_dict):
return_dict[proc_num] = task(*args, **kwargs)
def start_via_single_process(task, args, kwargs):
manager = multiprocessing.Manager()
return_dict = manager.dict()
proc_num = 0
proc = multiprocessing.Process(target=_wrapped_task, args=(proc_num, task, args, kwargs, return_dict))
print "proc=", proc
proc.daemon = False
proc.start()
proc.join()
return return_dict[proc_num]
def perform_task_in_parallel(task, params_parallel, n_jobs=1):
""" Performas a task in parallel
Parameters
----------
task : callable
The function/procedure that shall be executed
params_parallel : list
The parallel parameters
n_jobs : int, default 1
The number of jobs that shall be used
"""
if n_jobs == 1:
results = []
for params in params_parallel:
results.append(start_via_single_process(task, [params], {}))
return results
pool = multiprocessing.Pool(n_jobs)
results = pool.map(task, params_parallel)
pool.close()
pool.join()
return results | gpl-2.0 | 5,518,267,185,580,809,000 | 22.181818 | 106 | 0.60832 | false |
PaulWay/insights-core | insights/parsers/dumpe2fs_h.py | 1 | 2755 | """
DumpE2FS - Command ``dumpe2fs -h``
==================================
This parser handles ``dumpe2fs`` output.
The object provides access to this data using a dictionary. Particular keys
are stored as lists:
* Filesystem features
* Filesystem flags
* Default mount options
Other keys are stored as strings. The name of the device is stored in the
``dev_name`` property.
Typical contents of the ``/sbin/dumpe2fs -h /dev/device`` command::
dumpe2fs 1.41.12 (17-May-2010)
Filesystem volume name: <none>
Last mounted on: /usr
Filesystem UUID: 1b332c5d-2410-4934-9118-466f8a14841f
Filesystem magic number: 0xEF53
Filesystem revision #: 1 (dynamic)
Filesystem features: has_journal ext_attr resize_inode dir_index filetype needs_recovery extent flex_bg sparse_super large_file huge_file uninit_bg dir_nlink extra_isize
Filesystem flags: signed_directory_hash
Default mount options: user_xattr acl
Examples:
>>> e2fs = shared[DumpE2fs]
>>> e2fs.dev_name
'/dev/device'
>>> e2fs.data['Filesystem volume name']
'<none>'
>>> e2fs.data['Last mounted on']
'/usr'
>>> e2fs.data['Filesystem UUID']
'1b332c5d-2410-4934-9118-466f8a14841f'
>>> e2fs.data['Filesystem magic number']
'0xEF53'
>>> e2fs.data['Filesystem revision #']
'1 (dynamic)'
>>> e2fs.data['Filesystem features']
['has_journal', 'ext_attr', 'resize_inode', 'dir_index', 'filetype',
'needs_recovery', 'extent', 'flex_bg', 'sparse_super', 'large_file',
'huge_file', 'uninit_bg', 'dir_nlink', 'extra_isize'],
>>> e2fs.data['Filesystem flags']
['signed_directory_hash']
>>> e2fs.data['Default mount options']
['user_xattr', 'acl']
"""
from .. import parser, Parser
COMPOUND_FIELDS = ['Filesystem features', 'Filesystem flags', 'Default mount options']
@parser('dumpe2fs-h')
class DumpE2fs(Parser):
"""
Parse each line in the output of the ``dumpe2fs`` command.
"""
def parse_content(self, content):
dumpe2fs_values_dict = {}
for line in content:
if ":" in line:
key, value = line.split(":", 1)
if key in COMPOUND_FIELDS:
dumpe2fs_values_dict[key] = list(value.strip().split())
else:
dumpe2fs_values_dict[key] = value.strip()
self.dev_name = self.file_name.split('dumpe2fs_-h_')[-1].replace('.', '/')
self.data = dumpe2fs_values_dict
@parser('dumpe2fs-h')
def get_dumpe2fs_output(context):
"""
Deprecated, use DumpE2fs instead.
Also provides the data in the old device: data dictionary format.
"""
obj = DumpE2fs(context)
return {obj.dev_name: obj.data}
| apache-2.0 | -8,473,828,247,939,801,000 | 30.666667 | 178 | 0.621779 | false |
namecoin/namecoin-core | test/functional/test_framework/blocktools.py | 1 | 9711 | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
from binascii import a2b_hex
import struct
import time
import unittest
from .address import (
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from .messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
hash256,
hex_str_to_bytes,
ser_uint256,
sha256,
tx_from_hex,
uint256_from_str,
)
from .script import (
CScript,
CScriptNum,
CScriptOp,
OP_0,
OP_1,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_RETURN,
OP_TRUE,
hash160,
)
from .util import assert_equal
WITNESS_SCALE_FACTOR = 4
MAX_BLOCK_SIGOPS = 20000
MAX_BLOCK_SIGOPS_WEIGHT = MAX_BLOCK_SIGOPS * WITNESS_SCALE_FACTOR
# Genesis block time (regtest)
TIME_GENESIS_BLOCK = 1296688602
# Coinbase transaction outputs can only be spent after this number of new blocks (network rule)
COINBASE_MATURITY = 100
# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
NORMAL_GBT_REQUEST_PARAMS = {"rules": ["segwit"]}
def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl=None, txlist=None):
"""Create a block (with regtest difficulty)."""
block = CBlock()
if tmpl is None:
tmpl = {}
if version:
block.set_base_version(version)
elif tmpl.get('version'):
block.nVersion = tmpl.get('version')
else:
block.set_base_version(1)
block.nTime = ntime or tmpl.get('curtime') or int(time.time() + 600)
block.hashPrevBlock = hashprev or int(tmpl['previousblockhash'], 0x10)
if tmpl and not tmpl.get('bits') is None:
block.nBits = struct.unpack('>I', a2b_hex(tmpl['bits']))[0]
else:
block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams
if coinbase is None:
coinbase = create_coinbase(height=tmpl['height'])
block.vtx.append(coinbase)
if txlist:
for tx in txlist:
if not hasattr(tx, 'calc_sha256'):
tx = tx_from_hex(tx)
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def get_witness_script(witness_root, witness_nonce):
witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce)))
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
return CScript([OP_RETURN, output_data])
def add_witness_commitment(block, nonce=0):
"""Add a witness commitment to the block's coinbase transaction.
According to BIP141, blocks with witness rules active must commit to the
hash of all in-block transactions including witness."""
# First calculate the merkle root of the block's
# transactions, with witnesses.
witness_nonce = nonce
witness_root = block.calc_witness_merkle_root()
# witness_nonce should go to coinbase witness.
block.vtx[0].wit.vtxinwit = [CTxInWitness()]
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
# witness commitment is the last OP_RETURN output in coinbase
block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce)))
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
def script_BIP34_coinbase_height(height):
if height <= 16:
res = CScriptOp.encode_op_n(height)
# Append dummy to increase scriptSig size above 2 (see bad-cb-length consensus rule)
return CScript([res, OP_1])
return CScript([CScriptNum(height)])
def create_coinbase(height, pubkey=None, extra_output_script=None, fees=0, nValue=50):
"""Create a coinbase transaction.
If pubkey is passed in, the coinbase output will be a P2PK output;
otherwise an anyone-can-spend output.
If extra_output_script is given, make a 0-value output to that
script. This is useful to pad block weight/sigops as needed. """
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), 0xffffffff))
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = nValue * COIN
if nValue == 50:
halvings = int(height / 150) # regtest
coinbaseoutput.nValue >>= halvings
coinbaseoutput.nValue += fees
if pubkey is not None:
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [coinbaseoutput]
if extra_output_script is not None:
coinbaseoutput2 = CTxOut()
coinbaseoutput2.nValue = 0
coinbaseoutput2.scriptPubKey = extra_output_script
coinbase.vout.append(coinbaseoutput2)
coinbase.calc_sha256()
return coinbase
def create_tx_with_script(prevtx, n, script_sig=b"", *, amount, script_pub_key=CScript()):
"""Return one-input, one-output transaction object
spending the prevtx's n-th output with the given amount.
Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output.
"""
tx = CTransaction()
assert n < len(prevtx.vout)
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xffffffff))
tx.vout.append(CTxOut(amount, script_pub_key))
tx.calc_sha256()
return tx
def create_transaction(node, txid, to_address, *, amount):
""" Return signed transaction spending the first output of the
input txid. Note that the node must have a wallet that can
sign for the output that is being spent.
"""
raw_tx = create_raw_transaction(node, txid, to_address, amount=amount)
tx = tx_from_hex(raw_tx)
return tx
def create_raw_transaction(node, txid, to_address, *, amount):
""" Return raw signed transaction spending the first output of the
input txid. Note that the node must have a wallet that can sign
for the output that is being spent.
"""
psbt = node.createpsbt(inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount})
for _ in range(2):
for w in node.listwallets():
wrpc = node.get_wallet_rpc(w)
signed_psbt = wrpc.walletprocesspsbt(psbt)
psbt = signed_psbt['psbt']
final_psbt = node.finalizepsbt(psbt)
assert_equal(final_psbt["complete"], True)
return final_psbt['hex']
def get_legacy_sigopcount_block(block, accurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, accurate)
return count
def get_legacy_sigopcount_tx(tx, accurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(accurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(accurate)
return count
def witness_script(use_p2wsh, pubkey):
"""Create a scriptPubKey for a pay-to-witness TxOut.
This is either a P2WPKH output for the given pubkey, or a P2WSH output of a
1-of-1 multisig for the given pubkey. Returns the hex encoding of the
scriptPubKey."""
if not use_p2wsh:
# P2WPKH instead
pubkeyhash = hash160(hex_str_to_bytes(pubkey))
pkscript = CScript([OP_0, pubkeyhash])
else:
# 1-of-1 multisig
witness_program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
scripthash = sha256(witness_program)
pkscript = CScript([OP_0, scripthash])
return pkscript.hex()
def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
"""Return a transaction (in hex) that spends the given utxo to a segwit output.
Optionally wrap the segwit output using P2SH."""
if use_p2wsh:
program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program)
else:
addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey)
if not encode_p2sh:
assert_equal(node.getaddressinfo(addr)['scriptPubKey'], witness_script(use_p2wsh, pubkey))
return node.createrawtransaction([utxo], {addr: amount})
def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
"""Create a transaction spending a given utxo to a segwit output.
The output corresponds to the given pubkey: use_p2wsh determines whether to
use P2WPKH or P2WSH; encode_p2sh determines whether to wrap in P2SH.
sign=True will have the given node sign the transaction.
insert_redeem_script will be added to the scriptSig, if given."""
tx_to_witness = create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransactionwithwallet(tx_to_witness)
assert "errors" not in signed or len(["errors"]) == 0
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx = tx_from_hex(tx_to_witness)
tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)])
tx_to_witness = tx.serialize().hex()
return node.sendrawtransaction(tx_to_witness)
class TestFrameworkBlockTools(unittest.TestCase):
def test_create_coinbase(self):
height = 20
coinbase_tx = create_coinbase(height=height)
assert_equal(CScriptNum.decode(coinbase_tx.vin[0].scriptSig), height)
| mit | 8,934,684,380,001,036,000 | 36.35 | 108 | 0.679436 | false |
willprice/weboob | modules/allrecipes/pages.py | 6 | 4721 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.recipe import Recipe
from weboob.capabilities.base import NotAvailable, NotLoaded
from weboob.deprecated.browser import Page
class FourOFourPage(Page):
pass
class ResultsPage(Page):
""" Page which contains results as a list of recipies
"""
def iter_recipes(self):
for div in self.parser.select(self.document.getroot(), 'div.recipe-info'):
thumbnail_url = NotAvailable
short_description = NotAvailable
imgs = self.parser.select(div.getparent(), 'img')
if len(imgs) > 0:
url = unicode(imgs[0].attrib.get('src', ''))
if url.startswith('http://'):
thumbnail_url = url
link = self.parser.select(div, 'a.title', 1)
title = unicode(link.text)
id = unicode(link.attrib.get('href', '').split('/')[2])
recipe = Recipe(id, title)
recipe.thumbnail_url = thumbnail_url
recipe.short_description = short_description
recipe.instructions = NotLoaded
recipe.ingredients = NotLoaded
recipe.nb_person = NotLoaded
recipe.cooking_time = NotLoaded
recipe.preparation_time = NotLoaded
recipe.author = NotLoaded
yield recipe
class RecipePage(Page):
""" Page which contains a recipe
"""
def get_recipe(self, id):
title = NotAvailable
preparation_time = NotAvailable
cooking_time = NotAvailable
author = NotAvailable
nb_person = NotAvailable
ingredients = NotAvailable
picture_url = NotAvailable
instructions = NotAvailable
comments = NotAvailable
title = unicode(self.parser.select(self.document.getroot(), 'h1#itemTitle', 1).text)
imgillu = self.parser.select(self.document.getroot(), 'img#imgPhoto')
if len(imgillu) > 0:
picture_url = unicode(imgillu[0].attrib.get('src', ''))
ingredients = []
l_ing = self.parser.select(self.document.getroot(), 'li#liIngredient')
for ing in l_ing:
ingtxt = unicode(ing.text_content().strip())
if ingtxt != '':
ingredients.append(ingtxt)
instructions = u''
l_divinst = self.parser.select(self.document.getroot(), 'div.directLeft li')
num_instr = 1
for inst in l_divinst:
instructions += '%s: %s\n' % (num_instr, inst.text_content())
num_instr += 1
prepmin = 0
emprep = self.parser.select(self.document.getroot(), 'span#prepHoursSpan em')
if len(emprep) > 0:
prepmin += int(emprep[0].text) * 60
emprep = self.parser.select(self.document.getroot(), 'span#prepMinsSpan em')
if len(emprep) > 0:
prepmin += int(emprep[0].text)
if prepmin != 0:
preparation_time = prepmin
cookmin = 0
emcooktime = self.parser.select(self.document.getroot(), 'span#cookHoursSpan em')
if len(emcooktime) > 0:
cookmin += int(emcooktime[0].text) * 60
emcooktime = self.parser.select(self.document.getroot(), 'span#cookMinsSpan em')
if len(emcooktime) > 0:
cookmin += int(emcooktime[0].text)
if cookmin != 0:
cooking_time = cookmin
l_nbpers = self.parser.select(self.document.getroot(), 'span#lblYield[itemprop=recipeYield]')
if len(l_nbpers) > 0 and 'servings' in l_nbpers[0].text:
nb_person = [int(l_nbpers[0].text.split()[0])]
recipe = Recipe(id, title)
recipe.preparation_time = preparation_time
recipe.cooking_time = cooking_time
recipe.nb_person = nb_person
recipe.ingredients = ingredients
recipe.instructions = instructions
recipe.picture_url = picture_url
recipe.comments = comments
recipe.author = author
recipe.thumbnail_url = NotLoaded
return recipe
| agpl-3.0 | -7,999,287,810,841,263,000 | 36.468254 | 101 | 0.617878 | false |
jinmm1992/moose | framework/scripts/cluster_launcher.py | 11 | 5999 | #!/usr/bin/env python
import os, sys, re, shutil
from optparse import OptionParser, OptionGroup, Values
# Get the real path of cluster_launcher
if(os.path.islink(sys.argv[0])):
pathname = os.path.dirname(os.path.realpath(sys.argv[0]))
else:
pathname = os.path.dirname(sys.argv[0])
pathname = os.path.abspath(pathname)
# Add the utilities/python_getpot directory
MOOSE_DIR = os.path.abspath(os.path.join(pathname, '../../'))
FRAMEWORK_DIR = os.path.abspath(os.path.join(pathname, '../../', 'framework'))
#### See if MOOSE_DIR is already in the environment instead
if os.environ.has_key("MOOSE_DIR"):
MOOSE_DIR = os.environ['MOOSE_DIR']
FRAMEWORK_DIR = os.path.join(MOOSE_DIR, 'framework')
if os.environ.has_key("FRAMEWORK_DIR"):
FRAMEWORK_DIR = os.environ['FRAMEWORK_DIR']
# Import the TestHarness and Helper functions from the MOOSE toolkit
sys.path.append(os.path.join(MOOSE_DIR, 'python'))
import path_tool
path_tool.activate_module('TestHarness')
path_tool.activate_module('FactorySystem')
sys.path.append(os.path.join(FRAMEWORK_DIR, 'scripts', 'ClusterLauncher'))
from PBSJob import PBSJob
sys.path.append(os.path.join(MOOSE_DIR, 'python', 'FactorySystem'))
import ParseGetPot
from InputParameters import InputParameters
from Factory import Factory
# Default file to read if only a directory is supplied
job_list = 'job_list'
def getNextDirName(file_name, files):
largest_serial_num = 0
for name in files:
m = re.search(file_name + '_(\d{3})', name)
if m != None and int(m.group(1)) > largest_serial_num:
largest_serial_num = int(m.group(1))
return file_name + "_" + str(largest_serial_num+1).zfill(3)
class ClusterLauncher:
def __init__(self):
self.factory = Factory()
def parseJobsFile(self, template_dir, job_file):
jobs = []
# We expect the job list to be named "job_list"
filename = template_dir + job_file
try:
data = ParseGetPot.readInputFile(filename)
except: # ParseGetPot class
print "Parse Error: " + filename
return jobs
# We expect our root node to be called "Jobs"
if 'Jobs' in data.children:
jobs_node = data.children['Jobs']
# Get the active line
active_jobs = None
if 'active' in jobs_node.params:
active_jobs = jobs_node.params['active'].split(' ')
for jobname, job_node in jobs_node.children.iteritems():
# Make sure this job is active
if active_jobs != None and not jobname in active_jobs:
continue
# First retrieve the type so we can get the valid params
if 'type' not in job_node.params:
print "Type missing in " + filename
sys.exit(1)
params = self.factory.validParams(job_node.params['type'])
params['job_name'] = jobname
# Now update all the base level keys
params_parsed = set()
params_ignored = set()
for key, value in job_node.params.iteritems():
params_parsed.add(key)
if key in params:
if params.type(key) == list:
params[key] = value.split(' ')
else:
if re.match('".*"', value): # Strip quotes
params[key] = value[1:-1]
else:
params[key] = value
else:
params_ignored.add(key)
# Make sure that all required parameters are supplied
required_params_missing = params.required_keys() - params_parsed
if len(required_params_missing):
print 'Required Missing Parameter(s): ', required_params_missing
sys.exit(1)
if len(params_ignored):
print 'Ignored Parameter(s): ', params_ignored
jobs.append(params)
return jobs
def createAndLaunchJob(self, template_dir, job_file, specs, options):
next_dir = getNextDirName(specs['job_name'], os.listdir('.'))
os.mkdir(template_dir + next_dir)
# Log it
if options.message:
f = open(template_dir + 'jobs.log', 'a')
f.write(next_dir.ljust(20) + ': ' + options.message + '\n')
f.close()
saved_cwd = os.getcwd()
os.chdir(template_dir + next_dir)
# Turn the remaining work over to the Job instance
# To keep everything consistent we'll also append our serial number to our job name
specs['job_name'] = next_dir
job_instance = self.factory.create(specs['type'], specs['job_name'], specs)
# Copy files
job_instance.copyFiles(job_file)
# Prepare the Job Script
job_instance.prepareJobScript()
# Launch it!
job_instance.launch()
os.chdir(saved_cwd)
def registerJobType(self, type, name):
self.factory.register(type, name)
### Parameter Dump ###
def printDump(self):
self.factory.printDump("Jobs")
sys.exit(0)
def run(self, template_dir, job_file, options):
jobs = self.parseJobsFile(template_dir, job_file)
for job in jobs:
self.createAndLaunchJob(template_dir, job_file, job, options)
########################################################
def main():
parser = OptionParser(usage='Usage: %prog [options] <template directory>')
parser.add_option("--dump", action="store_true", dest="dump", default=False, help="Dump the parameters for the testers in GetPot Format")
parser.add_option("-m", action="store", dest="message", help="A message that will be stored in a local log file that describes the job")
(options, location) = parser.parse_args()
cluster_launcher = ClusterLauncher()
cluster_launcher.registerJobType(PBSJob, 'PBSJob')
if options.dump:
cluster_launcher.printDump()
if not location:
parser.print_help()
sys.exit(1)
# See if the user passed a file or a directory
abs_location = os.path.abspath(location[0])
if os.path.isdir(abs_location):
dir = abs_location
file = job_list
elif os.path.isfile(abs_location):
(dir, file) = os.path.split(abs_location)
dir = dir + '/'
# Launch it
cluster_launcher.run(dir, file, options)
if __name__ == '__main__':
main()
| lgpl-2.1 | -3,176,024,060,669,204,500 | 31.080214 | 139 | 0.645274 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.